entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
Attention
|
import math
import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, embedding_size, num_attention_heads,
attention_dropout, residual_dropout):
super(Attention, self).__init__()
self.num_attention_heads = num_attention_heads
self.size_per_head = embedding_size // num_attention_heads
self.embedding_size = embedding_size
self.query_key_value = nn.Linear(embedding_size, embedding_size * 3)
self.attn_drop = nn.Dropout(attention_dropout)
self.resid_drop = nn.Dropout(residual_dropout)
self.dense = nn.Linear(embedding_size, embedding_size)
def split_heads(self, x):
x = x.reshape([-1, self.seq_len, self.num_attention_heads, self.
size_per_head])
return x.permute(0, 2, 1, 3)
def forward(self, x, kv_cache=None):
self.seq_len = x.shape[1]
x = self.query_key_value(x)
q, k, v = torch.split(x, split_size_or_sections=self.embedding_size,
dim=2)
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
if kv_cache is not None:
pk, pv = kv_cache[0], kv_cache[1]
k = torch.cat([pk, k], dim=-2)
v = torch.cat([pv, v], dim=-2)
cached_kv = torch.stack([k, v])
attn = torch.matmul(q, k.transpose(-1, -2))
attn = attn / math.sqrt(self.size_per_head)
attention_mask = torch.tril(torch.ones(self.seq_len, self.seq_len,
dtype=torch.float32, device=x.device))
attention_mask = attention_mask.reshape([1, 1, self.seq_len, self.
seq_len])
attn = attn * attention_mask - 10000.0 * (1.0 - attention_mask)
attn = nn.Softmax(dim=-1)(attn)
attn = self.attn_drop(attn)
y = torch.matmul(attn, v)
y = y.permute(0, 2, 1, 3)
y = torch.reshape(y, [-1, self.seq_len, self.embedding_size])
y = self.resid_drop(self.dense(y))
return y, cached_kv
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_size': 4, 'num_attention_heads': 4,
'attention_dropout': 0.5, 'residual_dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 + x0 + 12 * x1 + 48 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 12 * x1 + 48 * (-4 + x2)), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_tril_3(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0 + -1 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_mul_rsub_sub_4(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = -1 * x0
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 <= tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp1, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tmp1 - tmp7
tmp10 = 10000.0
tmp11 = tmp9 * tmp10
tmp12 = tmp8 - tmp11
tmp14 = tmp13 * tmp1
tmp15 = 1 + -1 * x0
tmp16 = tmp15 <= tmp4
tmp17 = tl.where(tmp16, tmp1, tmp6)
tmp18 = tmp14 * tmp17
tmp19 = tmp1 - tmp17
tmp20 = tmp19 * tmp10
tmp21 = tmp18 - tmp20
tmp22 = triton_helpers.maximum(tmp12, tmp21)
tmp24 = tmp23 * tmp1
tmp25 = 2 + -1 * x0
tmp26 = tmp25 <= tmp4
tmp27 = tl.where(tmp26, tmp1, tmp6)
tmp28 = tmp24 * tmp27
tmp29 = tmp1 - tmp27
tmp30 = tmp29 * tmp10
tmp31 = tmp28 - tmp30
tmp32 = triton_helpers.maximum(tmp22, tmp31)
tmp34 = tmp33 * tmp1
tmp35 = 3 + -1 * x0
tmp36 = tmp35 <= tmp4
tmp37 = tl.where(tmp36, tmp1, tmp6)
tmp38 = tmp34 * tmp37
tmp39 = tmp1 - tmp37
tmp40 = tmp39 * tmp10
tmp41 = tmp38 - tmp40
tmp42 = triton_helpers.maximum(tmp32, tmp41)
tmp43 = tmp12 - tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = tmp21 - tmp42
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp44 + tmp46
tmp48 = tmp31 - tmp42
tmp49 = tl_math.exp(tmp48)
tmp50 = tmp47 + tmp49
tmp51 = tmp41 - tmp42
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp50 + tmp52
tl.store(out_ptr0 + x2, tmp42, xmask)
tl.store(out_ptr1 + x2, tmp53, xmask)
@triton.jit
def triton_poi_fused__softmax_div_mul_rsub_sub_5(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x4 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp13 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = x0 + -1 * x1
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 <= tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp1, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tmp1 - tmp7
tmp10 = 10000.0
tmp11 = tmp9 * tmp10
tmp12 = tmp8 - tmp11
tmp14 = tmp12 - tmp13
tmp15 = tl_math.exp(tmp14)
tmp17 = tmp15 / tmp16
tl.store(in_out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((8, 4, 4, 1), (16, 1, 4, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](buf0, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, buf2, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf0, buf3, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_tril_3[grid(16)](buf5, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_mul_rsub_sub_4[grid(64)](buf4, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_div_mul_rsub_sub_5[grid(256)](buf8, buf6,
buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_6[grid(16, 4)](buf0, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf0
buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
triton_poi_fused_add_8[grid(64)](buf13, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf13, reinterpret_tensor(buf1, (2, 4, 4, 4, 1), (64, 16, 1, 4,
4), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf5, buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 4), 0)
class AttentionNew(nn.Module):
def __init__(self, embedding_size, num_attention_heads,
attention_dropout, residual_dropout):
super(AttentionNew, self).__init__()
self.num_attention_heads = num_attention_heads
self.size_per_head = embedding_size // num_attention_heads
self.embedding_size = embedding_size
self.query_key_value = nn.Linear(embedding_size, embedding_size * 3)
self.attn_drop = nn.Dropout(attention_dropout)
self.resid_drop = nn.Dropout(residual_dropout)
self.dense = nn.Linear(embedding_size, embedding_size)
def split_heads(self, x):
x = x.reshape([-1, self.seq_len, self.num_attention_heads, self.
size_per_head])
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_2 = self.query_key_value.weight
primals_3 = self.query_key_value.bias
primals_4 = self.dense.weight
primals_5 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
AeroXi/CPM-Generate-Pytorch
|
Attention
| false
| 8,913
|
[
"Apache-2.0"
] | 0
|
a1530ad2848a690c6e1557f996fe58538fe86884
|
https://github.com/AeroXi/CPM-Generate-Pytorch/tree/a1530ad2848a690c6e1557f996fe58538fe86884
|
DiceLoss
|
import torch
from torch import nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, pred, target, weight=None):
smooth = 1
size = pred.size(0)
pred_flat = pred.view(size, -1)
target_flat = target.view(size, -1)
intersection = pred_flat * target_flat
dice_score = (2 * intersection.sum(1) + smooth) / (pred_flat.sum(1) +
target_flat.sum(1) + smooth)
if weight is not None:
dice_score = weight * dice_score
dice_loss = weight.sum() / size - dice_score.sum() / size
else:
dice_loss = 1 - dice_score.sum() / size
return dice_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp3 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
CityU-AIM-Group/PRR-Imbalance
|
DiceLoss
| false
| 8,914
|
[
"MIT"
] | 0
|
e893809c72697511897c9100c25f831087fc345f
|
https://github.com/CityU-AIM-Group/PRR-Imbalance/tree/e893809c72697511897c9100c25f831087fc345f
|
HardSwish
|
import torch
import torch.nn.functional as F
from torch import nn
class HardSwish(nn.Module):
def forward(self, x):
return x * F.hardtanh(x + 3, 0.0, 6.0, True) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HardSwishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Cris-zj/mmdetection
|
HardSwish
| false
| 8,915
|
[
"Apache-2.0"
] | 0
|
ede648b93e7ba2562f835f338b778f3e705f7119
|
https://github.com/Cris-zj/mmdetection/tree/ede648b93e7ba2562f835f338b778f3e705f7119
|
FocalLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25,
reduction='mean', avg_factor=None):
"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma (float): The gamma for calculating the modulating factor.
Defaults to 2.0.
alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' ,
loss is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma
)
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class FocalLoss(nn.Module):
"""Focal loss.
Args:
gamma (float): Focusing parameter in focal loss.
Defaults to 2.0.
alpha (float): The parameter in balanced form of focal
loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss into
a scalar. Options are "none" and "mean". Defaults to 'mean'.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0
):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction
with shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \\*). Dafaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_cls = self.loss_weight * sigmoid_focal_loss(pred, target,
weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction,
avg_factor=avg_factor)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = 0.25
tmp14 = tmp0 * tmp13
tmp15 = 0.75
tmp16 = tmp2 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tl.sigmoid(tmp3)
tmp19 = tmp1 - tmp18
tmp20 = tmp19 * tmp0
tmp21 = tmp18 * tmp2
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp17 * tmp23
tmp25 = tmp12 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tmp29 = 256.0
tmp30 = tmp28 / tmp29
tmp31 = tmp30 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25,
reduction='mean', avg_factor=None):
"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma (float): The gamma for calculating the modulating factor.
Defaults to 2.0.
alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' ,
loss is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma
)
loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none'
) * focal_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class FocalLossNew(nn.Module):
"""Focal loss.
Args:
gamma (float): Focusing parameter in focal loss.
Defaults to 2.0.
alpha (float): The parameter in balanced form of focal
loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss into
a scalar. Options are "none" and "mean". Defaults to 'mean'.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0
):
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Chrisfsj2051/my_tools
|
FocalLoss
| false
| 8,916
|
[
"MIT"
] | 0
|
67355a46df6290aa2fdc1e0266c61daacced3ba1
|
https://github.com/Chrisfsj2051/my_tools/tree/67355a46df6290aa2fdc1e0266c61daacced3ba1
|
EncoderSlot
|
import torch
from torch import nn
class EncoderSlot(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5)
self.conv_2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
self.conv_4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
def forward(self, inputs):
out = self.conv_1(inputs)
out = self.conv_2(out)
out = self.conv_3(out)
out = self.conv_4(out)
return out
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3136 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 2704 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 2304 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(921600)](buf1, primals_2,
921600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 56, 56), (200704, 3136, 56, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(802816)](buf3, primals_5,
802816, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 52, 52), (173056, 2704, 52, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(692224)](buf5, primals_7,
692224, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 48, 48), (147456, 2304, 48, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(589824)](buf7, primals_9,
589824, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class EncoderSlotNew(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5)
self.conv_2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
self.conv_4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
def forward(self, input_0):
primals_1 = self.conv_1.weight
primals_2 = self.conv_1.bias
primals_4 = self.conv_2.weight
primals_5 = self.conv_2.bias
primals_6 = self.conv_3.weight
primals_7 = self.conv_3.bias
primals_8 = self.conv_4.weight
primals_9 = self.conv_4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
CatarauCorina/representation_learning
|
EncoderSlot
| false
| 8,917
|
[
"Apache-2.0"
] | 0
|
bb467761b03e5d8ac20c2f705f3bfdb84a7c3842
|
https://github.com/CatarauCorina/representation_learning/tree/bb467761b03e5d8ac20c2f705f3bfdb84a7c3842
|
GlobalAveragePooling
|
import torch
import torch.nn as nn
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
Args:
dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}.
Default: 2
"""
def __init__(self, dim=2):
super(GlobalAveragePooling, self).__init__()
assert dim in [1, 2, 3
], f'GlobalAveragePooling dim only support {1, 2, 3}, get {dim} instead.'
if dim == 1:
self.gap = nn.AdaptiveAvgPool1d(1)
elif dim == 2:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AdaptiveAvgPool3d((1, 1, 1))
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple([out.view(x.size(0), -1) for out, x in zip(outs,
inputs)])
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
class GlobalAveragePoolingNew(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
Args:
dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}.
Default: 2
"""
def __init__(self, dim=2):
super(GlobalAveragePoolingNew, self).__init__()
assert dim in [1, 2, 3
], f'GlobalAveragePooling dim only support {1, 2, 3}, get {dim} instead.'
if dim == 1:
self.gap = nn.AdaptiveAvgPool1d(1)
elif dim == 2:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AdaptiveAvgPool3d((1, 1, 1))
def init_weights(self):
pass
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Chrisfsj2051/my_tools
|
GlobalAveragePooling
| false
| 8,918
|
[
"MIT"
] | 0
|
67355a46df6290aa2fdc1e0266c61daacced3ba1
|
https://github.com/Chrisfsj2051/my_tools/tree/67355a46df6290aa2fdc1e0266c61daacced3ba1
|
Mish
|
import torch
import torch.nn.functional as F
from torch import nn
class Mish(nn.Module):
def forward(self, x):
return x * F.softplus(x).tanh()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = libdevice.tanh(tmp5)
tmp7 = tmp0 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Cris-zj/mmdetection
|
Mish
| false
| 8,919
|
[
"Apache-2.0"
] | 0
|
ede648b93e7ba2562f835f338b778f3e705f7119
|
https://github.com/Cris-zj/mmdetection/tree/ede648b93e7ba2562f835f338b778f3e705f7119
|
AsymmetricLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def asymmetric_loss(pred, target, weight=None, gamma_pos=1.0, gamma_neg=4.0,
clip=0.05, reduction='mean', avg_factor=None):
"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
eps = 1e-08
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target
) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 -
target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class AsymmetricLoss(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction=
'mean', loss_weight=1.0):
super(AsymmetricLoss, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None):
"""asymmetric loss."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_cls = self.loss_weight * asymmetric_loss(pred, target, weight,
gamma_pos=self.gamma_pos, gamma_neg=self.gamma_neg, clip=self.
clip, reduction=reduction, avg_factor=avg_factor)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 0.05
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.minimum(tmp5, tmp2)
tmp8 = tmp2 - tmp7
tmp9 = tmp6 * tmp8
tmp10 = tmp1 * tmp7
tmp11 = tmp9 + tmp10
tmp12 = 1e-08
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tmp2 - tmp11
tmp17 = 0.0
tmp18 = tmp7 * tmp17
tmp19 = 4.0
tmp20 = tmp8 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = libdevice.pow(tmp16, tmp21)
tmp23 = tmp15 * tmp22
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp2
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sigmoid_0[grid(1)
](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def asymmetric_loss(pred, target, weight=None, gamma_pos=1.0, gamma_neg=4.0,
clip=0.05, reduction='mean', avg_factor=None):
"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \\*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \\*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == target.shape, 'pred and target should be in the same shape.'
eps = 1e-08
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid + clip).clamp(max=1) * (1 - target
) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * (1 -
target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
class AsymmetricLossNew(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self, gamma_pos=0.0, gamma_neg=4.0, clip=0.05, reduction=
'mean', loss_weight=1.0):
super(AsymmetricLossNew, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Chrisfsj2051/my_tools
|
AsymmetricLoss
| false
| 8,920
|
[
"MIT"
] | 0
|
67355a46df6290aa2fdc1e0266c61daacced3ba1
|
https://github.com/Chrisfsj2051/my_tools/tree/67355a46df6290aa2fdc1e0266c61daacced3ba1
|
MaxPoolStride1
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
import torch._utils
class MaxPoolStride1(nn.Module):
def __init__(self, kernel_size):
super(MaxPoolStride1, self).__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def forward(self, x):
padding = int(self.pad / 2)
padded_x = F.pad(x, (padding, padding, padding, padding), mode=
'constant', value=0)
pooled_x = nn.MaxPool2d(self.kernel_size, 1)(padded_x)
return pooled_x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp8 & tmp13
tmp16 = tmp15 & tmp14
tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask,
other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp8 & tmp20
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask,
other=0.0)
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp8 & tmp27
tmp30 = tmp29 & tmp28
tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask,
other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp6
tmp38 = tmp37 & tmp7
tmp39 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = triton_helpers.maximum(tmp39, tmp32)
tmp41 = tmp36 & tmp13
tmp42 = tmp41 & tmp14
tmp43 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp42 & xmask, other=0.0
)
tmp44 = triton_helpers.maximum(tmp43, tmp40)
tmp45 = tmp36 & tmp20
tmp46 = tmp45 & tmp21
tmp47 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask,
other=0.0)
tmp48 = triton_helpers.maximum(tmp47, tmp44)
tmp49 = tmp36 & tmp27
tmp50 = tmp49 & tmp28
tmp51 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp50 & xmask,
other=0.0)
tmp52 = triton_helpers.maximum(tmp51, tmp48)
tmp53 = 1 + x1
tmp54 = tmp53 >= tmp1
tmp55 = tmp53 < tmp3
tmp56 = tmp54 & tmp55
tmp57 = tmp56 & tmp6
tmp58 = tmp57 & tmp7
tmp59 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp58 & xmask,
other=0.0)
tmp60 = triton_helpers.maximum(tmp59, tmp52)
tmp61 = tmp56 & tmp13
tmp62 = tmp61 & tmp14
tmp63 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask,
other=0.0)
tmp64 = triton_helpers.maximum(tmp63, tmp60)
tmp65 = tmp56 & tmp20
tmp66 = tmp65 & tmp21
tmp67 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp66 & xmask,
other=0.0)
tmp68 = triton_helpers.maximum(tmp67, tmp64)
tmp69 = tmp56 & tmp27
tmp70 = tmp69 & tmp28
tmp71 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp70 & xmask,
other=0.0)
tmp72 = triton_helpers.maximum(tmp71, tmp68)
tmp73 = 2 + x1
tmp74 = tmp73 >= tmp1
tmp75 = tmp73 < tmp3
tmp76 = tmp74 & tmp75
tmp77 = tmp76 & tmp6
tmp78 = tmp77 & tmp7
tmp79 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask,
other=0.0)
tmp80 = triton_helpers.maximum(tmp79, tmp72)
tmp81 = tmp76 & tmp13
tmp82 = tmp81 & tmp14
tmp83 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp82 & xmask,
other=0.0)
tmp84 = triton_helpers.maximum(tmp83, tmp80)
tmp85 = tmp76 & tmp20
tmp86 = tmp85 & tmp21
tmp87 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp86 & xmask,
other=0.0)
tmp88 = triton_helpers.maximum(tmp87, tmp84)
tmp89 = tmp76 & tmp27
tmp90 = tmp89 & tmp28
tmp91 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp90 & xmask,
other=0.0)
tmp92 = triton_helpers.maximum(tmp91, tmp88)
tl.store(out_ptr0 + x4, tmp92, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0,
144, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolStride1New(nn.Module):
def __init__(self, kernel_size):
super(MaxPoolStride1New, self).__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AutoRaider/AlphaPose
|
MaxPoolStride1
| false
| 8,921
|
[
"Apache-2.0"
] | 0
|
bf74882728901b033d45512b402c32277bf9246b
|
https://github.com/AutoRaider/AlphaPose/tree/bf74882728901b033d45512b402c32277bf9246b
|
Actor
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=400,
fc2_units=300):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (4, 300), (300, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf8, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf7, 19200, XBLOCK=128, num_warps=4, num_stages=1
)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1,
300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_tanh_3[grid(256)](buf6, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, buf6, primals_6, buf7, primals_4, buf8
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class ActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=400,
fc2_units=300):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
CCThompson82/deep-reinforcement-learning
|
Actor
| false
| 8,922
|
[
"MIT"
] | 0
|
f93faf0fb2b2dd8cfafeb8a4480e5520cefe6cb2
|
https://github.com/CCThompson82/deep-reinforcement-learning/tree/f93faf0fb2b2dd8cfafeb8a4480e5520cefe6cb2
|
RFDB
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def activation(act_type, inplace=True, neg_slope=0.05, n_prelu=1):
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'lrelu':
layer = nn.LeakyReLU(neg_slope, False)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.
format(act_type))
return layer
def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1,
groups=1):
padding = int((kernel_size - 1) / 2) * dilation
return nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding=padding, bias=True, dilation=dilation, groups=groups)
class ESA(nn.Module):
def __init__(self, n_feats, conv):
super(ESA, self).__init__()
f = n_feats // 4
self.conv1 = conv(n_feats, f, kernel_size=1)
self.conv_f = conv(f, f, kernel_size=1)
self.conv_max = conv(f, f, kernel_size=3, padding=1)
self.conv2 = conv(f, f, kernel_size=3, stride=2, padding=0)
self.conv3 = conv(f, f, kernel_size=3, padding=1)
self.conv3_ = conv(f, f, kernel_size=3, padding=1)
self.conv4 = conv(f, n_feats, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, x):
c1_ = self.conv1(x)
c1 = self.conv2(c1_)
v_max = F.max_pool2d(c1, kernel_size=7, stride=3)
v_range = self.relu(self.conv_max(v_max))
c3 = self.relu(self.conv3(v_range))
c3 = self.conv3_(c3)
c3 = F.interpolate(c3, (x.size(2), x.size(3)), mode='bilinear',
align_corners=False)
cf = self.conv_f(c1_)
c4 = self.conv4(self.skip_add.add(c3, cf))
m = self.sigmoid(c4)
return self.skip_add.mul(x, m)
class RFDB(nn.Module):
def __init__(self, in_channels, distillation_rate=0.25):
super(RFDB, self).__init__()
self.dc = self.distilled_channels = in_channels // 2
self.rc = self.remaining_channels = in_channels
self.c1_d = conv_layer(in_channels, self.dc, 1)
self.c1_r = conv_layer(in_channels, self.rc, 3)
self.c2_d = conv_layer(self.remaining_channels, self.dc, 1)
self.c2_r = conv_layer(self.remaining_channels, self.rc, 3)
self.c3_d = conv_layer(self.remaining_channels, self.dc, 1)
self.c3_r = conv_layer(self.remaining_channels, self.rc, 3)
self.c4 = conv_layer(self.remaining_channels, self.dc, 3)
self.act = activation('lrelu', neg_slope=0.05)
self.c5 = conv_layer(self.dc * 4, in_channels, 1)
self.esa = ESA(in_channels, nn.Conv2d)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, input):
distilled_c1 = self.act(self.c1_d(input))
r_c1 = self.c1_r(input)
r_c1 = self.act(self.skip_add.add(r_c1, input))
distilled_c2 = self.act(self.c2_d(r_c1))
r_c2 = self.c2_r(r_c1)
r_c2 = self.act(self.skip_add.add(r_c2, r_c1))
distilled_c3 = self.act(self.c3_d(r_c2))
r_c3 = self.c3_r(r_c2)
r_c3 = self.act(self.skip_add.add(r_c3, r_c2))
r_c4 = self.act(self.c4(r_c3))
out = torch.cat([distilled_c1, distilled_c2, distilled_c3, r_c4], dim=1
)
out_fused = self.esa(self.c5(out))
return out_fused
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 2
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.05
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 8
x0 = xindex % 4096
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 8192 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 8192 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.05
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 4, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr3 + (x0 + 4096 * (-2 + x1) + 8192 * x2), tmp17,
other=0.0).to(tl.int1)
tmp19 = tl.load(in_ptr4 + (x0 + 4096 * (-2 + x1) + 8192 * x2), tmp17,
other=0.0)
tmp20 = tl.load(in_ptr5 + (-2 + x1), tmp17, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tmp21 * tmp9
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tmp0 >= tmp15
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr6 + (x0 + 4096 * (-4 + x1) + 8192 * x2), tmp29,
other=0.0).to(tl.int1)
tmp31 = tl.load(in_ptr7 + (x0 + 4096 * (-4 + x1) + 8192 * x2), tmp29,
other=0.0)
tmp32 = tl.load(in_ptr8 + (-4 + x1), tmp29, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tmp33 * tmp9
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp29, tmp35, tmp36)
tmp38 = tmp0 >= tmp27
tl.full([1], 8, tl.int64)
tmp41 = tl.load(in_ptr9 + (x0 + 4096 * (-6 + x1) + 8192 * x2), tmp38,
other=0.0).to(tl.int1)
tmp42 = tl.load(in_ptr10 + (x0 + 4096 * (-6 + x1) + 8192 * x2), tmp38,
other=0.0)
tmp43 = tl.load(in_ptr11 + (-6 + x1), tmp38, eviction_policy=
'evict_last', other=0.0)
tmp44 = tmp42 + tmp43
tmp45 = tmp44 * tmp9
tmp46 = tl.where(tmp41, tmp44, tmp45)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp38, tmp46, tmp47)
tmp49 = tl.where(tmp29, tmp37, tmp48)
tmp50 = tl.where(tmp17, tmp25, tmp49)
tmp51 = tl.where(tmp4, tmp13, tmp50)
tl.store(out_ptr0 + x3, tmp51, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3844
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 324
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused__to_copy_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_8(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_10(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr8 + x3, None)
tmp39 = tl.load(in_ptr9 + 0)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x2), None,
eviction_policy='evict_last')
tmp12 = tmp9 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = tmp13 < 0
tmp16 = tl.where(tmp15, tmp14, tmp13)
tmp17 = tl.load(in_ptr2 + (tmp16 + 9 * tmp4 + 81 * x2), None,
eviction_policy='evict_last')
tmp18 = tmp17 + tmp11
tmp19 = tmp18 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr2 + (tmp8 + 9 * tmp26 + 81 * x2), None,
eviction_policy='evict_last')
tmp28 = tmp27 + tmp11
tmp29 = tl.load(in_ptr2 + (tmp16 + 9 * tmp26 + 81 * x2), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp11
tmp31 = tmp30 - tmp28
tmp32 = tmp31 * tmp20
tmp33 = tmp28 + tmp32
tmp34 = tmp33 - tmp22
tmp36 = tmp34 * tmp35
tmp37 = tmp22 + tmp36
tmp41 = tmp38 + tmp40
tmp42 = tmp37 + tmp41
tl.store(in_out_ptr0 + x3, tmp42, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_11(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31) = args
args.clear()
assert_size_stride(primals_1, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (2,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (2,), (1,))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (2, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_15, (2,), (1,))
assert_size_stride(primals_16, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_21, (1,), (1,))
assert_size_stride(primals_22, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_23, (1,), (1,))
assert_size_stride(primals_24, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_25, (1,), (1,))
assert_size_stride(primals_26, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_27, (1,), (1,))
assert_size_stride(primals_28, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_29, (1,), (1,))
assert_size_stride(primals_30, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_31, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf0,
primals_2, buf1, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf4 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_convolution_leaky_relu_1[grid(65536)](buf2,
primals_5, primals_3, buf3, buf4, 65536, XBLOCK=512, num_warps=
4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf6 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf5,
primals_7, buf6, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf8 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf9 = buf2
del buf2
triton_poi_fused_add_convolution_leaky_relu_1[grid(65536)](buf7,
primals_9, buf4, buf8, buf9, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_9
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf11 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf10,
primals_11, buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf13 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf14 = buf7
del buf7
triton_poi_fused_add_convolution_leaky_relu_1[grid(65536)](buf12,
primals_13, buf9, buf13, buf14, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_13
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf16 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf15,
primals_15, buf16, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(131072)](buf1, buf0, primals_2, buf6,
buf5, primals_7, buf11, buf10, primals_11, buf16, buf15,
primals_15, buf17, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del buf10
del buf15
del buf5
del primals_11
del primals_15
del primals_2
del primals_7
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_3[grid(65536)](buf19, primals_17,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_17
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_4[grid(16384)](buf21, primals_19,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf22 = extern_kernels.convolution(buf21, primals_20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 1, 31, 31), (961, 961, 31, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_5[grid(3844)](buf23, primals_21, 3844,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
buf24 = torch.ops.aten.max_pool2d_with_indices.default(buf23, [7, 7
], [3, 3])
buf25 = buf24[0]
buf26 = buf24[1]
del buf24
buf27 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 1, 9, 9), (81, 81, 9, 1))
buf28 = buf27
del buf27
triton_poi_fused_convolution_relu_6[grid(324)](buf28, primals_23,
324, XBLOCK=128, num_warps=4, num_stages=1)
del primals_23
buf29 = extern_kernels.convolution(buf28, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 1, 9, 9), (81, 81, 9, 1))
buf30 = buf29
del buf29
triton_poi_fused_convolution_relu_6[grid(324)](buf30, primals_25,
324, XBLOCK=128, num_warps=4, num_stages=1)
del primals_25
buf31 = extern_kernels.convolution(buf30, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 1, 9, 9), (81, 81, 9, 1))
buf32 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_7[grid(64)](buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_8[grid(64)](buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_7[grid(64)](buf34, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf35 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_8[grid(64)](buf35, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf36 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9[grid(64)](buf36,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9[grid(64)](buf38,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf40 = extern_kernels.convolution(buf21, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf39 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf41 = reinterpret_tensor(buf39, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf39
triton_poi_fused__unsafe_index_add_convolution_mul_sub_10[grid(16384)](
buf41, buf32, buf34, buf31, primals_27, buf35, buf36, buf33,
buf38, buf40, primals_29, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf31
del buf40
del primals_27
del primals_29
buf42 = extern_kernels.convolution(buf41, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf43 = buf42
del buf42
buf44 = buf12
del buf12
triton_poi_fused_convolution_mul_sigmoid_11[grid(65536)](buf43,
primals_31, buf19, buf44, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_31
return (buf44, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf14,
buf16, buf17, buf19, buf21, buf23, buf25, buf26, buf28, buf30,
buf32, buf33, buf34, buf35, buf36, buf38, buf41, buf43)
def activation(act_type, inplace=True, neg_slope=0.05, n_prelu=1):
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'lrelu':
layer = nn.LeakyReLU(neg_slope, False)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.
format(act_type))
return layer
def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1,
groups=1):
padding = int((kernel_size - 1) / 2) * dilation
return nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding=padding, bias=True, dilation=dilation, groups=groups)
class ESA(nn.Module):
def __init__(self, n_feats, conv):
super(ESA, self).__init__()
f = n_feats // 4
self.conv1 = conv(n_feats, f, kernel_size=1)
self.conv_f = conv(f, f, kernel_size=1)
self.conv_max = conv(f, f, kernel_size=3, padding=1)
self.conv2 = conv(f, f, kernel_size=3, stride=2, padding=0)
self.conv3 = conv(f, f, kernel_size=3, padding=1)
self.conv3_ = conv(f, f, kernel_size=3, padding=1)
self.conv4 = conv(f, n_feats, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, x):
c1_ = self.conv1(x)
c1 = self.conv2(c1_)
v_max = F.max_pool2d(c1, kernel_size=7, stride=3)
v_range = self.relu(self.conv_max(v_max))
c3 = self.relu(self.conv3(v_range))
c3 = self.conv3_(c3)
c3 = F.interpolate(c3, (x.size(2), x.size(3)), mode='bilinear',
align_corners=False)
cf = self.conv_f(c1_)
c4 = self.conv4(self.skip_add.add(c3, cf))
m = self.sigmoid(c4)
return self.skip_add.mul(x, m)
class RFDBNew(nn.Module):
def __init__(self, in_channels, distillation_rate=0.25):
super(RFDBNew, self).__init__()
self.dc = self.distilled_channels = in_channels // 2
self.rc = self.remaining_channels = in_channels
self.c1_d = conv_layer(in_channels, self.dc, 1)
self.c1_r = conv_layer(in_channels, self.rc, 3)
self.c2_d = conv_layer(self.remaining_channels, self.dc, 1)
self.c2_r = conv_layer(self.remaining_channels, self.rc, 3)
self.c3_d = conv_layer(self.remaining_channels, self.dc, 1)
self.c3_r = conv_layer(self.remaining_channels, self.rc, 3)
self.c4 = conv_layer(self.remaining_channels, self.dc, 3)
self.act = activation('lrelu', neg_slope=0.05)
self.c5 = conv_layer(self.dc * 4, in_channels, 1)
self.esa = ESA(in_channels, nn.Conv2d)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, input_0):
primals_1 = self.c1_d.weight
primals_2 = self.c1_d.bias
primals_4 = self.c1_r.weight
primals_5 = self.c1_r.bias
primals_6 = self.c2_d.weight
primals_7 = self.c2_d.bias
primals_8 = self.c2_r.weight
primals_9 = self.c2_r.bias
primals_10 = self.c3_d.weight
primals_11 = self.c3_d.bias
primals_12 = self.c3_r.weight
primals_13 = self.c3_r.bias
primals_14 = self.c4.weight
primals_15 = self.c4.bias
primals_16 = self.c5.weight
primals_17 = self.c5.bias
primals_18 = self.esa.conv1.weight
primals_19 = self.esa.conv1.bias
primals_28 = self.esa.conv_f.weight
primals_21 = self.esa.conv_f.bias
primals_20 = self.esa.conv_max.weight
primals_23 = self.esa.conv_max.bias
primals_22 = self.esa.conv2.weight
primals_25 = self.esa.conv2.bias
primals_24 = self.esa.conv3.weight
primals_27 = self.esa.conv3.bias
primals_26 = self.esa.conv3_.weight
primals_29 = self.esa.conv3_.bias
primals_30 = self.esa.conv4.weight
primals_31 = self.esa.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31])
return output[0]
|
BigKingXXL/RFDN
|
RFDB
| false
| 8,923
|
[
"MIT"
] | 0
|
35efe7db2558ca063206f3b5ab8341ba9c5e2dc8
|
https://github.com/BigKingXXL/RFDN/tree/35efe7db2558ca063206f3b5ab8341ba9c5e2dc8
|
GELU
|
import torch
import torch.nn as nn
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp3 * tmp0
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def __init__(self):
super(GELUNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ChurchChen/SparsityRegularization
|
GELU
| false
| 8,924
|
[
"Apache-2.0"
] | 0
|
5c2e050ffe511cf4307a0bcd98360d28b7db8fef
|
https://github.com/ChurchChen/SparsityRegularization/tree/5c2e050ffe511cf4307a0bcd98360d28b7db8fef
|
RFDBsmall
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def activation(act_type, inplace=True, neg_slope=0.05, n_prelu=1):
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'lrelu':
layer = nn.LeakyReLU(neg_slope, False)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.
format(act_type))
return layer
def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1,
groups=1):
padding = int((kernel_size - 1) / 2) * dilation
return nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding=padding, bias=True, dilation=dilation, groups=groups)
class ESA(nn.Module):
def __init__(self, n_feats, conv):
super(ESA, self).__init__()
f = n_feats // 4
self.conv1 = conv(n_feats, f, kernel_size=1)
self.conv_f = conv(f, f, kernel_size=1)
self.conv_max = conv(f, f, kernel_size=3, padding=1)
self.conv2 = conv(f, f, kernel_size=3, stride=2, padding=0)
self.conv3 = conv(f, f, kernel_size=3, padding=1)
self.conv3_ = conv(f, f, kernel_size=3, padding=1)
self.conv4 = conv(f, n_feats, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, x):
c1_ = self.conv1(x)
c1 = self.conv2(c1_)
v_max = F.max_pool2d(c1, kernel_size=7, stride=3)
v_range = self.relu(self.conv_max(v_max))
c3 = self.relu(self.conv3(v_range))
c3 = self.conv3_(c3)
c3 = F.interpolate(c3, (x.size(2), x.size(3)), mode='bilinear',
align_corners=False)
cf = self.conv_f(c1_)
c4 = self.conv4(self.skip_add.add(c3, cf))
m = self.sigmoid(c4)
return self.skip_add.mul(x, m)
class RFDBsmall(nn.Module):
def __init__(self, in_channels, distillation_rate=0.25):
super(RFDBsmall, self).__init__()
self.dc = self.distilled_channels = in_channels // 2
self.rc = self.remaining_channels = in_channels
self.c1_d = conv_layer(in_channels, self.dc, 1)
self.c1_r = conv_layer(in_channels, self.rc, 3)
self.c2_d = conv_layer(self.remaining_channels, self.dc, 1)
self.c2_r = conv_layer(self.remaining_channels, self.rc, 3)
self.c3 = conv_layer(self.remaining_channels, self.dc, 3)
self.act = activation('lrelu', neg_slope=0.05)
self.c4 = conv_layer(self.dc * 3, in_channels, 1)
self.esa = ESA(in_channels, nn.Conv2d)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, input):
distilled_c1 = self.act(self.c1_d(input))
r_c1 = self.c1_r(input)
r_c1 = self.act(self.skip_add.add(r_c1, input))
"""
----> conv ->
+ -> act --> r_c<n> -> conv -> act -> distilled_c<n+1>
------------>
"""
distilled_c2 = self.act(self.c2_d(r_c1))
r_c2 = self.c2_r(r_c1)
r_c2 = self.act(self.skip_add.add(r_c2, r_c1))
r_c3 = self.act(self.c3(r_c2))
out = torch.cat([distilled_c1, distilled_c2, r_c3], dim=1)
out_fused = self.esa(self.c4(out))
return out_fused
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 2
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.05
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 6
x0 = xindex % 4096
x2 = xindex // 24576
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 8192 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 8192 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.05
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 4, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr3 + (x0 + 4096 * (-2 + x1) + 8192 * x2), tmp17,
other=0.0).to(tl.int1)
tmp19 = tl.load(in_ptr4 + (x0 + 4096 * (-2 + x1) + 8192 * x2), tmp17,
other=0.0)
tmp20 = tl.load(in_ptr5 + (-2 + x1), tmp17, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tmp21 * tmp9
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tmp0 >= tmp15
tl.full([1], 6, tl.int64)
tmp29 = tl.load(in_ptr6 + (x0 + 4096 * (-4 + x1) + 8192 * x2), tmp26,
other=0.0).to(tl.int1)
tmp30 = tl.load(in_ptr7 + (x0 + 4096 * (-4 + x1) + 8192 * x2), tmp26,
other=0.0)
tmp31 = tl.load(in_ptr8 + (-4 + x1), tmp26, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp9
tmp34 = tl.where(tmp29, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp26, tmp34, tmp35)
tmp37 = tl.where(tmp17, tmp25, tmp36)
tmp38 = tl.where(tmp4, tmp13, tmp37)
tl.store(out_ptr0 + x3, tmp38, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3844
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 324
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused__to_copy_7(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_8(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.140625
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_mul_sub_10(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7,
in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr8 + x3, None)
tmp39 = tl.load(in_ptr9 + 0)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp1 = tl.full([XBLOCK], 9, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x2), None,
eviction_policy='evict_last')
tmp12 = tmp9 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = tmp13 < 0
tmp16 = tl.where(tmp15, tmp14, tmp13)
tmp17 = tl.load(in_ptr2 + (tmp16 + 9 * tmp4 + 81 * x2), None,
eviction_policy='evict_last')
tmp18 = tmp17 + tmp11
tmp19 = tmp18 - tmp12
tmp21 = tmp19 * tmp20
tmp22 = tmp12 + tmp21
tmp24 = tmp23 + tmp1
tmp25 = tmp23 < 0
tmp26 = tl.where(tmp25, tmp24, tmp23)
tmp27 = tl.load(in_ptr2 + (tmp8 + 9 * tmp26 + 81 * x2), None,
eviction_policy='evict_last')
tmp28 = tmp27 + tmp11
tmp29 = tl.load(in_ptr2 + (tmp16 + 9 * tmp26 + 81 * x2), None,
eviction_policy='evict_last')
tmp30 = tmp29 + tmp11
tmp31 = tmp30 - tmp28
tmp32 = tmp31 * tmp20
tmp33 = tmp28 + tmp32
tmp34 = tmp33 - tmp22
tmp36 = tmp34 * tmp35
tmp37 = tmp22 + tmp36
tmp41 = tmp38 + tmp40
tmp42 = tmp37 + tmp41
tl.store(in_out_ptr0 + x3, tmp42, None)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_11(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (2,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (2, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_11, (2,), (1,))
assert_size_stride(primals_12, (4, 6, 1, 1), (6, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_15, (1,), (1,))
assert_size_stride(primals_16, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_17, (1,), (1,))
assert_size_stride(primals_18, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_21, (1,), (1,))
assert_size_stride(primals_22, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_23, (1,), (1,))
assert_size_stride(primals_24, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_25, (1,), (1,))
assert_size_stride(primals_26, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_27, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf0,
primals_2, buf1, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf4 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_convolution_leaky_relu_1[grid(65536)](buf2,
primals_5, primals_3, buf3, buf4, 65536, XBLOCK=512, num_warps=
4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf6 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf5,
primals_7, buf6, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf8 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf9 = buf2
del buf2
triton_poi_fused_add_convolution_leaky_relu_1[grid(65536)](buf7,
primals_9, buf4, buf8, buf9, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_9
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf11 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(32768)](buf10,
primals_11, buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(98304)](buf1, buf0, primals_2, buf6,
buf5, primals_7, buf11, buf10, primals_11, buf12, 98304, XBLOCK
=512, num_warps=8, num_stages=1)
del buf0
del buf10
del buf5
del primals_11
del primals_2
del primals_7
buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_3[grid(65536)](buf14, primals_13,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf16 = buf15
del buf15
triton_poi_fused_convolution_4[grid(16384)](buf16, primals_15,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf17 = extern_kernels.convolution(buf16, primals_16, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 1, 31, 31), (961, 961, 31, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_5[grid(3844)](buf18, primals_17, 3844,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf19 = torch.ops.aten.max_pool2d_with_indices.default(buf18, [7, 7
], [3, 3])
buf20 = buf19[0]
buf21 = buf19[1]
del buf19
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 1, 9, 9), (81, 81, 9, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(324)](buf23, primals_19,
324, XBLOCK=128, num_warps=4, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1, 9, 9), (81, 81, 9, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(324)](buf25, primals_21,
324, XBLOCK=128, num_warps=4, num_stages=1)
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 1, 9, 9), (81, 81, 9, 1))
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_7[grid(64)](buf27, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf28 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_8[grid(64)](buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_7[grid(64)](buf29, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_8[grid(64)](buf30, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9[grid(64)](buf31,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf33 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_9[grid(64)](buf33,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf35 = extern_kernels.convolution(buf16, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf34 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf36 = reinterpret_tensor(buf34, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf34
triton_poi_fused__unsafe_index_add_convolution_mul_sub_10[grid(16384)](
buf36, buf27, buf29, buf26, primals_23, buf30, buf31, buf28,
buf33, buf35, primals_25, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del buf26
del buf35
del primals_23
del primals_25
buf37 = extern_kernels.convolution(buf36, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf38 = buf37
del buf37
buf39 = buf7
del buf7
triton_poi_fused_convolution_mul_sigmoid_11[grid(65536)](buf38,
primals_27, buf14, buf39, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_27
return (buf39, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, buf1, buf3, buf4,
buf6, buf8, buf9, buf11, buf12, buf14, buf16, buf18, buf20, buf21,
buf23, buf25, buf27, buf28, buf29, buf30, buf31, buf33, buf36, buf38)
def activation(act_type, inplace=True, neg_slope=0.05, n_prelu=1):
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'lrelu':
layer = nn.LeakyReLU(neg_slope, False)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.
format(act_type))
return layer
def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1,
groups=1):
padding = int((kernel_size - 1) / 2) * dilation
return nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding=padding, bias=True, dilation=dilation, groups=groups)
class ESA(nn.Module):
def __init__(self, n_feats, conv):
super(ESA, self).__init__()
f = n_feats // 4
self.conv1 = conv(n_feats, f, kernel_size=1)
self.conv_f = conv(f, f, kernel_size=1)
self.conv_max = conv(f, f, kernel_size=3, padding=1)
self.conv2 = conv(f, f, kernel_size=3, stride=2, padding=0)
self.conv3 = conv(f, f, kernel_size=3, padding=1)
self.conv3_ = conv(f, f, kernel_size=3, padding=1)
self.conv4 = conv(f, n_feats, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, x):
c1_ = self.conv1(x)
c1 = self.conv2(c1_)
v_max = F.max_pool2d(c1, kernel_size=7, stride=3)
v_range = self.relu(self.conv_max(v_max))
c3 = self.relu(self.conv3(v_range))
c3 = self.conv3_(c3)
c3 = F.interpolate(c3, (x.size(2), x.size(3)), mode='bilinear',
align_corners=False)
cf = self.conv_f(c1_)
c4 = self.conv4(self.skip_add.add(c3, cf))
m = self.sigmoid(c4)
return self.skip_add.mul(x, m)
class RFDBsmallNew(nn.Module):
def __init__(self, in_channels, distillation_rate=0.25):
super(RFDBsmallNew, self).__init__()
self.dc = self.distilled_channels = in_channels // 2
self.rc = self.remaining_channels = in_channels
self.c1_d = conv_layer(in_channels, self.dc, 1)
self.c1_r = conv_layer(in_channels, self.rc, 3)
self.c2_d = conv_layer(self.remaining_channels, self.dc, 1)
self.c2_r = conv_layer(self.remaining_channels, self.rc, 3)
self.c3 = conv_layer(self.remaining_channels, self.dc, 3)
self.act = activation('lrelu', neg_slope=0.05)
self.c4 = conv_layer(self.dc * 3, in_channels, 1)
self.esa = ESA(in_channels, nn.Conv2d)
self.skip_add = torch.nn.quantized.FloatFunctional()
def forward(self, input_0):
primals_1 = self.c1_d.weight
primals_2 = self.c1_d.bias
primals_4 = self.c1_r.weight
primals_5 = self.c1_r.bias
primals_6 = self.c2_d.weight
primals_7 = self.c2_d.bias
primals_8 = self.c2_r.weight
primals_9 = self.c2_r.bias
primals_10 = self.c3.weight
primals_11 = self.c3.bias
primals_12 = self.c4.weight
primals_13 = self.c4.bias
primals_14 = self.esa.conv1.weight
primals_15 = self.esa.conv1.bias
primals_24 = self.esa.conv_f.weight
primals_17 = self.esa.conv_f.bias
primals_16 = self.esa.conv_max.weight
primals_19 = self.esa.conv_max.bias
primals_18 = self.esa.conv2.weight
primals_21 = self.esa.conv2.bias
primals_20 = self.esa.conv3.weight
primals_23 = self.esa.conv3.bias
primals_22 = self.esa.conv3_.weight
primals_25 = self.esa.conv3_.bias
primals_26 = self.esa.conv4.weight
primals_27 = self.esa.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
|
BigKingXXL/RFDN
|
RFDBsmall
| false
| 8,925
|
[
"MIT"
] | 0
|
35efe7db2558ca063206f3b5ab8341ba9c5e2dc8
|
https://github.com/BigKingXXL/RFDN/tree/35efe7db2558ca063206f3b5ab8341ba9c5e2dc8
|
OELoss
|
import torch
import torch.nn as nn
class OELoss(nn.Module):
def __init__(self):
super(OELoss, self).__init__()
def forward(self, x):
return -(x.mean(1) - torch.logsumexp(x, dim=1)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_logsumexp_mean_neg_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = triton_helpers.maximum(tmp0, tmp1)
tmp10 = triton_helpers.maximum(tmp9, tmp3)
tmp11 = triton_helpers.maximum(tmp10, tmp5)
tmp12 = tl_math.abs(tmp11)
tmp13 = float('inf')
tmp14 = tmp12 == tmp13
tmp15 = 0.0
tmp16 = tl.where(tmp14, tmp15, tmp11)
tmp17 = tmp0 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp1 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp3 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp5 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tmp29 = tmp28 + tmp16
tmp30 = tmp8 - tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = 64.0
tmp35 = tmp33 / tmp34
tmp36 = -tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_logsumexp_mean_neg_sub_0[grid(1)](buf2, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class OELossNew(nn.Module):
def __init__(self):
super(OELossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
ChurchChen/SparsityRegularization
|
OELoss
| false
| 8,926
|
[
"Apache-2.0"
] | 0
|
5c2e050ffe511cf4307a0bcd98360d28b7db8fef
|
https://github.com/ChurchChen/SparsityRegularization/tree/5c2e050ffe511cf4307a0bcd98360d28b7db8fef
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum(
) + smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Charbel199/Oil-Spill-Thickness-Estimation
|
DiceLoss
| false
| 8,927
|
[
"MIT"
] | 0
|
dd600f6da611461f3b8072389bc34e6285109246
|
https://github.com/Charbel199/Oil-Spill-Thickness-Estimation/tree/dd600f6da611461f3b8072389bc34e6285109246
|
Q
|
import torch
import torch.nn as nn
class P(nn.Module):
"""
to solve min(P) = ||I-PQ||^2 + γ||P-R||^2
this is a least square problem
how to solve?
P* = (gamma*R + I*Q) / (Q*Q + gamma)
"""
def __init__(self):
super().__init__()
def forward(self, I, Q, R, gamma):
return (I * Q + gamma * R) / (gamma + Q * Q)
class Q(nn.Module):
"""
to solve min(Q) = ||I-PQ||^2 + λ||Q-L||^2
Q* = (lamda*L + I*P) / (P*P + lamda)
"""
def __init__(self):
super().__init__()
def forward(self, I, P, L, lamda):
IR = I[:, 0:1, :, :]
IG = I[:, 1:2, :, :]
IB = I[:, 2:3, :, :]
PR = P[:, 0:1, :, :]
PG = P[:, 1:2, :, :]
PB = P[:, 2:3, :, :]
return (IR * PR + IG * PG + IB * PB + lamda * L) / (PR * PR + PG *
PG + PB * PB + lamda)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x3, xmask)
tmp5 = tl.load(in_ptr3 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr3 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp5 * tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp12 + tmp1
tmp14 = tmp4 / tmp13
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](arg0_1, arg1_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_1[grid(256)](buf0, arg2_1, arg3_1,
arg1_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
del arg2_1
del arg3_1
del buf0
return buf1,
class P(nn.Module):
"""
to solve min(P) = ||I-PQ||^2 + γ||P-R||^2
this is a least square problem
how to solve?
P* = (gamma*R + I*Q) / (Q*Q + gamma)
"""
def __init__(self):
super().__init__()
def forward(self, I, Q, R, gamma):
return (I * QNew + gamma * R) / (gamma + QNew * QNew)
class QNew(nn.Module):
"""
to solve min(Q) = ||I-PQ||^2 + λ||Q-L||^2
Q* = (lamda*L + I*P) / (P*P + lamda)
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
AndersonYong/URetinex-Net-Retinex-based-Deep-Unfolding-Network-for-Low-light-Image-Enhancem
|
Q
| false
| 8,928
|
[
"MIT"
] | 0
|
9d837b8df9c761defb1eca390b3a60aa4a6fbb1a
|
https://github.com/AndersonYong/URetinex-Net-Retinex-based-Deep-Unfolding-Network-for-Low-light-Image-Enhancem/tree/9d837b8df9c761defb1eca390b3a60aa4a6fbb1a
|
P
|
import torch
import torch.nn as nn
class P(nn.Module):
"""
to solve min(P) = ||I-PQ||^2 + γ||P-R||^2
this is a least square problem
how to solve?
P* = (gamma*R + I*Q) / (Q*Q + gamma)
"""
def __init__(self):
super().__init__()
def forward(self, I, Q, R, gamma):
return (I * Q + gamma * R) / (gamma + Q * Q)
class Q(nn.Module):
"""
to solve min(Q) = ||I-PQ||^2 + λ||Q-L||^2
Q* = (lamda*L + I*P) / (P*P + lamda)
"""
def __init__(self):
super().__init__()
def forward(self, I, P, L, lamda):
IR = I[:, 0:1, :, :]
IG = I[:, 1:2, :, :]
IB = I[:, 2:3, :, :]
PR = P[:, 0:1, :, :]
PG = P[:, 1:2, :, :]
PB = P[:, 2:3, :, :]
return (IR * PR + IG * PG + IB * PB + lamda * L) / (PR * PR + PG *
PG + PB * PB + lamda)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tmp1 * tmp1
tmp8 = tmp3 + tmp7
tmp9 = tmp6 / tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_0[grid(256)](arg0_1, arg1_1, arg2_1,
arg3_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class PNew(nn.Module):
"""
to solve min(P) = ||I-PQ||^2 + γ||P-R||^2
this is a least square problem
how to solve?
P* = (gamma*R + I*Q) / (Q*Q + gamma)
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
class Q(nn.Module):
"""
to solve min(Q) = ||I-PQ||^2 + λ||Q-L||^2
Q* = (lamda*L + I*P) / (P*P + lamda)
"""
def __init__(self):
super().__init__()
def forward(self, I, P, L, lamda):
IR = I[:, 0:1, :, :]
IG = I[:, 1:2, :, :]
IB = I[:, 2:3, :, :]
PR = PNew[:, 0:1, :, :]
PG = PNew[:, 1:2, :, :]
PB = PNew[:, 2:3, :, :]
return (IR * PR + IG * PG + IB * PB + lamda * L) / (PR * PR + PG *
PG + PB * PB + lamda)
|
AndersonYong/URetinex-Net-Retinex-based-Deep-Unfolding-Network-for-Low-light-Image-Enhancem
|
P
| false
| 8,929
|
[
"MIT"
] | 0
|
9d837b8df9c761defb1eca390b3a60aa4a6fbb1a
|
https://github.com/AndersonYong/URetinex-Net-Retinex-based-Deep-Unfolding-Network-for-Low-light-Image-Enhancem/tree/9d837b8df9c761defb1eca390b3a60aa4a6fbb1a
|
get_loss
|
import torch
import torch.nn as nn
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
def forward(self, pred, target):
weight = target + 1
loss = nn.BCELoss(weight=weight)(pred, target)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tmp0 + tmp1
tmp14 = tmp12 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class get_lossNew(nn.Module):
def __init__(self):
super(get_lossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ChunhuiChen97/RetinalVesselSegmentation
|
get_loss
| false
| 8,930
|
[
"MIT"
] | 0
|
d291e23b1ad9814070897ef850d0117d67331d70
|
https://github.com/ChunhuiChen97/RetinalVesselSegmentation/tree/d291e23b1ad9814070897ef850d0117d67331d70
|
SSWELoss
|
import torch
import torch.nn as nn
class HingeMarginLoss(nn.Module):
"""
计算hinge loss 接口
"""
def __init__(self):
super(HingeMarginLoss, self).__init__()
def forward(self, t, tr, delt=None, size_average=False):
"""
计算hingle loss
"""
if delt is None:
loss = torch.clamp(1 - t + tr, min=0)
else:
loss = torch.clamp(1 - torch.mul(t - tr, torch.squeeze(delt)),
min=0)
loss = torch.unsqueeze(loss, dim=-1)
return loss
class SSWELoss(nn.Module):
"""
SSWE 中考虑gram得分和情感得分的loss类
"""
def __init__(self, alpha=0.5):
super(SSWELoss, self).__init__()
self.alpha = alpha
self.hingeLoss = HingeMarginLoss()
def forward(self, scores, labels, size_average=False):
"""
[(true_sy_score,true_sem_score), (corrput_sy_score,corrupt_sem_score),...]
"""
assert len(scores) >= 2
true_score = scores[0]
sem_loss = self.hingeLoss(true_score[1][:, 0], true_score[1][:, 1],
delt=labels)
loss = []
for corpt_i in range(1, len(scores)):
syn_loss = self.hingeLoss(true_score[0], scores[corpt_i][0])
cur_loss = syn_loss * self.alpha + sem_loss * (1 - self.alpha)
loss.append(cur_loss)
loss = torch.cat(loss, dim=0)
if size_average:
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 256
x5 = xindex % 16
x1 = xindex // 4 % 4
x6 = xindex // 4 % 64
x7 = xindex
tmp0 = x3
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x5, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 1.0
tmp7 = tmp6 - tmp5
tmp8 = tl.load(in_ptr0 + (64 + x5), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tl.load(in_ptr0 + (16 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (17 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.load(in_ptr1 + (x6 + 64 * x3), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tmp6 - tmp18
tmp20 = triton_helpers.maximum(tmp19, tmp10)
tmp21 = tmp20 * tmp12
tmp22 = tmp13 + tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp4, tmp22, tmp23)
tmp25 = tmp0 >= tmp3
tmp26 = tl.full([1], 8, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tl.load(in_ptr0 + x5, tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp6 - tmp29
tmp31 = tl.load(in_ptr0 + (128 + x5), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = triton_helpers.maximum(tmp32, tmp10)
tmp34 = tmp33 * tmp12
tmp35 = tl.load(in_ptr0 + (16 + 4 * x1), tmp28 & xmask, eviction_policy
='evict_last', other=0.0)
tmp36 = tl.load(in_ptr0 + (17 + 4 * x1), tmp28 & xmask, eviction_policy
='evict_last', other=0.0)
tmp37 = tmp35 - tmp36
tmp38 = tl.load(in_ptr1 + (x6 + 64 * (-4 + x3)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 * tmp38
tmp40 = tmp6 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp10)
tmp42 = tmp41 * tmp12
tmp43 = tmp34 + tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp28, tmp43, tmp44)
tmp46 = tmp0 >= tmp26
tl.full([1], 12, tl.int64)
tmp49 = tl.load(in_ptr0 + x5, tmp46 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp50 = tmp6 - tmp49
tmp51 = tl.load(in_ptr0 + (192 + x5), tmp46 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp52 = tmp50 + tmp51
tmp53 = triton_helpers.maximum(tmp52, tmp10)
tmp54 = tmp53 * tmp12
tmp55 = tl.load(in_ptr0 + (16 + 4 * x1), tmp46 & xmask, eviction_policy
='evict_last', other=0.0)
tmp56 = tl.load(in_ptr0 + (17 + 4 * x1), tmp46 & xmask, eviction_policy
='evict_last', other=0.0)
tmp57 = tmp55 - tmp56
tmp58 = tl.load(in_ptr1 + (x6 + 64 * (-8 + x3)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp59 = tmp57 * tmp58
tmp60 = tmp6 - tmp59
tmp61 = triton_helpers.maximum(tmp60, tmp10)
tmp62 = tmp61 * tmp12
tmp63 = tmp54 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp46, tmp63, tmp64)
tmp66 = tl.where(tmp28, tmp45, tmp65)
tmp67 = tl.where(tmp4, tmp24, tmp66)
tl.store(out_ptr0 + x7, tmp67, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((12, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(3072)](arg0_1, arg1_1, buf0, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class HingeMarginLoss(nn.Module):
"""
计算hinge loss 接口
"""
def __init__(self):
super(HingeMarginLoss, self).__init__()
def forward(self, t, tr, delt=None, size_average=False):
"""
计算hingle loss
"""
if delt is None:
loss = torch.clamp(1 - t + tr, min=0)
else:
loss = torch.clamp(1 - torch.mul(t - tr, torch.squeeze(delt)),
min=0)
loss = torch.unsqueeze(loss, dim=-1)
return loss
class SSWELossNew(nn.Module):
"""
SSWE 中考虑gram得分和情感得分的loss类
"""
def __init__(self, alpha=0.5):
super(SSWELossNew, self).__init__()
self.alpha = alpha
self.hingeLoss = HingeMarginLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Cuiqingyao/multilabel
|
SSWELoss
| false
| 8,931
|
[
"Apache-2.0"
] | 0
|
f36dc6f1168a3edf8f43565477c096dc0bf31de8
|
https://github.com/Cuiqingyao/multilabel/tree/f36dc6f1168a3edf8f43565477c096dc0bf31de8
|
Pooler
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.linear import Linear
import torch.nn.init as init
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Pooler(nn.Module):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size):
super(Pooler, self).__init__()
self.dense = Linear(hidden_size, hidden_size)
def forward(self, hidden_states, sequence_index=0):
pooled = hidden_states[:, sequence_index, :]
pooled = self.dense(pooled)
pooled = torch.tanh(pooled)
return pooled
class Linear(nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(self, input_size, output_size, bias=True, skip_bias_add=False
):
super(Linear, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.skip_bias_add = skip_bias_add
self.weight = Parameter(torch.empty(self.output_size, self.input_size))
init.normal_(self.weight)
if bias:
self.bias = Parameter(torch.empty(self.output_size))
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
def forward(self, input_):
bias = self.bias if not self.skip_bias_add else None
output = F.linear(input_, self.weight, bias)
if self.skip_bias_add:
return output, self.bias
else:
return output
def __repr__(self):
return (
f'Linear(in_features={self.input_size}, out_features={self.output_size}, '
+
f'bias={self.bias is not None}, skip_bias_add={self.skip_bias_add})'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.linear import Linear
import torch.nn.init as init
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_2, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_2
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2
class PoolerNew(nn.Module):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size):
super(PoolerNew, self).__init__()
self.dense = Linear(hidden_size, hidden_size)
def forward(self, input_0):
primals_3 = self.dense.weight
primals_2 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
class Linear(nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(self, input_size, output_size, bias=True, skip_bias_add=False
):
super(Linear, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.skip_bias_add = skip_bias_add
self.weight = Parameter(torch.empty(self.output_size, self.input_size))
init.normal_(self.weight)
if bias:
self.bias = Parameter(torch.empty(self.output_size))
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
def forward(self, input_):
bias = self.bias if not self.skip_bias_add else None
output = F.linear(input_, self.weight, bias)
if self.skip_bias_add:
return output, self.bias
else:
return output
def __repr__(self):
return (
f'Linear(in_features={self.input_size}, out_features={self.output_size}, '
+
f'bias={self.bias is not None}, skip_bias_add={self.skip_bias_add})'
)
|
BoxiangW/ColossalAI-Examples
|
Pooler
| false
| 8,932
|
[
"Apache-2.0"
] | 0
|
853fefe709508839a56df0cfe1a548e02254724a
|
https://github.com/BoxiangW/ColossalAI-Examples/tree/853fefe709508839a56df0cfe1a548e02254724a
|
GELU_
|
import math
import torch
import torch.nn as nn
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x +
0.044715 * torch.pow(x, 3))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELU_New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AniketRajpoot/reformer-pytorch
|
GELU_
| false
| 8,933
|
[
"MIT"
] | 0
|
06b131eb383e7a3a184b7038ef20fe614958216f
|
https://github.com/AniketRajpoot/reformer-pytorch/tree/06b131eb383e7a3a184b7038ef20fe614958216f
|
MultiHeadAttention
|
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = torch.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'hidden_size': 4, 'attention_dropout_rate': 0.5,
'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_mul_0[grid(16, 4)](buf0, primals_3, buf3, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttentionNew, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear_q.weight
primals_3 = self.linear_q.bias
primals_4 = self.linear_k.weight
primals_5 = self.linear_k.bias
primals_7 = self.linear_v.weight
primals_8 = self.linear_v.bias
primals_10 = self.output_layer.weight
primals_11 = self.output_layer.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
ChantalMP/Graphormer
|
MultiHeadAttention
| false
| 8,934
|
[
"MIT"
] | 0
|
5c384d0f2840afc88ee88aeb874f4b1f41d760bf
|
https://github.com/ChantalMP/Graphormer/tree/5c384d0f2840afc88ee88aeb874f4b1f41d760bf
|
TVLoss
|
import torch
import torch.nn as nn
class TVLoss(nn.Module):
def __init__(self, strength):
super(TVLoss, self).__init__()
self.strength = strength
def forward(self, input):
self.x_diff = input[:, :, 1:, :] - input[:, :, :-1, :]
self.y_diff = input[:, :, :, 1:] - input[:, :, :, :-1]
self.loss = self.strength * (torch.sum(torch.abs(self.x_diff)) +
torch.sum(torch.abs(self.y_diff)))
return input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'strength': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mul_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex
r3 = rindex % 3
r4 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r3 + 4 * r4), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r3 + 4 * r4), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tmp17 = 4.0
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp2, rmask)
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp10, rmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 4), (48, 12, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
buf4 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_add_mul_sub_sum_0[grid(1)](buf4, arg0_1, buf0,
buf2, 1, 192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf4, buf2, buf0
class TVLossNew(nn.Module):
def __init__(self, strength):
super(TVLossNew, self).__init__()
self.strength = strength
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DarekGit/neural_style
|
TVLoss
| false
| 8,935
|
[
"MIT"
] | 0
|
461f0d791f23e82bbf0adcecf5630854ccac9944
|
https://github.com/DarekGit/neural_style/tree/461f0d791f23e82bbf0adcecf5630854ccac9944
|
ScaledDotProductAttention
|
import math
import torch
from torch import nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super().__init__()
self.dropout = nn.Dropout(0.5)
self.sqrt_d_k = math.sqrt(d_k)
def forward(self, Q, K, V):
attn = torch.bmm(Q, K.transpose(2, 1))
attn = attn / self.sqrt_d_k
attn = torch.softmax(attn, dim=-1)
attn = self.dropout(attn)
y = torch.bmm(attn, V)
return y, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, d_k):
super().__init__()
self.dropout = nn.Dropout(0.5)
self.sqrt_d_k = math.sqrt(d_k)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
DaanG96/breakfastDSNet
|
ScaledDotProductAttention
| false
| 8,936
|
[
"MIT"
] | 0
|
17a146ef5ad077e935e6f4b773e0a1f605f76a78
|
https://github.com/DaanG96/breakfastDSNet/tree/17a146ef5ad077e935e6f4b773e0a1f605f76a78
|
TorchModel
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TorchModel(nn.Module):
def __init__(self):
super(TorchModel, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 288000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3136 % 20
x0 = xindex % 3136
x3 = xindex // 3136
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (20, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(288000)](buf1, primals_2,
288000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 56, 56), (62720, 3136, 56, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 20, 56, 56), (64000, 3200, 56, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(250880)](
buf3, primals_5, buf4, 250880, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class TorchModelNew(nn.Module):
def __init__(self):
super(TorchModelNew, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
DLPerf/elasticdl
|
TorchModel
| false
| 8,937
|
[
"MIT"
] | 0
|
b9c03ea0e81861ae8d349c3d8ffd1f7b588b910b
|
https://github.com/DLPerf/elasticdl/tree/b9c03ea0e81861ae8d349c3d8ffd1f7b588b910b
|
ScaleNorm
|
import torch
import torch.nn as nn
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-05
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp15 * tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)](
primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
return buf0, primals_1
class ScaleNormNew(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, input_0):
primals_2 = self.g
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
AniketRajpoot/reformer-pytorch
|
ScaleNorm
| false
| 8,938
|
[
"MIT"
] | 0
|
06b131eb383e7a3a184b7038ef20fe614958216f
|
https://github.com/AniketRajpoot/reformer-pytorch/tree/06b131eb383e7a3a184b7038ef20fe614958216f
|
ScaledDotProductAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf4
return buf3, buf2, buf5
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2]
|
Blair129/FEAT-master
|
ScaledDotProductAttention
| false
| 8,939
|
[
"MIT"
] | 0
|
459e05000a8cca5421fafb7d2f33f19418378df7
|
https://github.com/Blair129/FEAT-master/tree/459e05000a8cca5421fafb7d2f33f19418378df7
|
VAE
|
import torch
import torch.nn.functional as F
from torch import nn
class VAE(nn.Module):
"""A classic VAE.
Params
------
input_dim : int
The size of the (flattened) image vector
latent_dim : int
The size of the latent memory
"""
def __init__(self, input_dim=784, latent_dim=20):
super(VAE, self).__init__()
self.input_dim = int(input_dim)
self.latent_dim = int(latent_dim)
self.fc1 = nn.Linear(self.input_dim, 400)
self.fc21 = nn.Linear(400, self.latent_dim)
self.fc22 = nn.Linear(400, self.latent_dim)
self.fc3 = nn.Linear(self.latent_dim, 400)
self.fc4 = nn.Linear(400, self.input_dim)
def encode(self, x):
"""Encode a torch tensor (batch_size, input_size)"""
x = x.view(-1, self.input_dim)
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def _reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
"""Expand a latent memory, to input_size."""
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def sample(self, n, device=None):
"""Use noise to sample n images from latent space."""
with torch.no_grad():
x = torch.randn(n, self.latent_dim)
x = x
samples = self.decode(x)
return samples
def forward(self, x):
"""Get a reconstructed image"""
mu, logvar = self.encode(x)
z = self._reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20,), (1,))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = torch.ops.aten.randn.default([4, 20], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(80)](buf2, buf5, buf3, buf6, 80,
XBLOCK=128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1,
20), 0), out=buf7)
buf8 = buf7
del buf7
triton_poi_fused_relu_0[grid(1600)](buf8, primals_9, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784),
(1, 400), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_11, 3136,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8,
buf10, primals_10, primals_8, primals_6, primals_4)
class VAENew(nn.Module):
"""A classic VAE.
Params
------
input_dim : int
The size of the (flattened) image vector
latent_dim : int
The size of the latent memory
"""
def __init__(self, input_dim=784, latent_dim=20):
super(VAENew, self).__init__()
self.input_dim = int(input_dim)
self.latent_dim = int(latent_dim)
self.fc1 = nn.Linear(self.input_dim, 400)
self.fc21 = nn.Linear(400, self.latent_dim)
self.fc22 = nn.Linear(400, self.latent_dim)
self.fc3 = nn.Linear(self.latent_dim, 400)
self.fc4 = nn.Linear(400, self.input_dim)
def encode(self, x):
"""Encode a torch tensor (batch_size, input_size)"""
x = x.view(-1, self.input_dim)
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def _reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
"""Expand a latent memory, to input_size."""
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def sample(self, n, device=None):
"""Use noise to sample n images from latent space."""
with torch.no_grad():
x = torch.randn(n, self.latent_dim)
x = x
samples = self.decode(x)
return samples
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_6 = self.fc22.weight
primals_7 = self.fc22.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_10 = self.fc4.weight
primals_11 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
|
CoAxLab/newremagine
|
VAE
| false
| 8,940
|
[
"MIT"
] | 0
|
5ae1c579121c93271ebf5dcef45bd66e8daea3a7
|
https://github.com/CoAxLab/newremagine/tree/5ae1c579121c93271ebf5dcef45bd66e8daea3a7
|
ResidualSequential
|
import torch
import torch.nn as nn
import torch.nn.init
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
x_ = x[:, :, diff2 / 2:out.size(2) + diff2 / 2, diff3 / 2:out.
size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 + tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ResidualSequentialNew(nn.Sequential):
def __init__(self, *args):
super(ResidualSequentialNew, self).__init__(*args)
def eval(self):
None
for m in self.modules():
m.eval()
exit()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DDQXZcp/FYP_ProjectFile_TANG_Zhiheng
|
ResidualSequential
| false
| 8,941
|
[
"MIT"
] | 0
|
b0e3b9d1c5cee61e1d09a32e405244bda09b6f0d
|
https://github.com/DDQXZcp/FYP_ProjectFile_TANG_Zhiheng/tree/b0e3b9d1c5cee61e1d09a32e405244bda09b6f0d
|
Hsigmoid
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def __repr__(self):
return 'Hsigmoid()'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
def __init__(self, inplace=True):
super(HsigmoidNew, self).__init__()
self.inplace = inplace
def __repr__(self):
return 'Hsigmoid()'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AlbertiPot/once-for-all
|
Hsigmoid
| false
| 8,942
|
[
"MIT"
] | 0
|
092b9e6184be353383396761ea5ec61d67152645
|
https://github.com/AlbertiPot/once-for-all/tree/092b9e6184be353383396761ea5ec61d67152645
|
Flatten
|
import torch
from torch import nn
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, c, h, w].
Returns:
a float tensor with shape [batch_size, c*h*w].
"""
x = x.transpose(3, 2).contiguous()
return x.view(x.size(0), -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64), (64, 1), 0),
class FlattenNew(nn.Module):
def __init__(self):
super(FlattenNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DebugVBZ/pixel2style2pixel
|
Flatten
| false
| 8,943
|
[
"MIT"
] | 0
|
e884c0cf471ad9ee09b8743d7ffd532283a638e5
|
https://github.com/DebugVBZ/pixel2style2pixel/tree/e884c0cf471ad9ee09b8743d7ffd532283a638e5
|
GenNoise
|
import torch
import torch.nn as nn
import torch.nn.init
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim2': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
return buf2,
class GenNoiseNew(nn.Module):
def __init__(self, dim2):
super(GenNoiseNew, self).__init__()
self.dim2 = dim2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DDQXZcp/FYP_ProjectFile_TANG_Zhiheng
|
GenNoise
| false
| 8,944
|
[
"MIT"
] | 0
|
b0e3b9d1c5cee61e1d09a32e405244bda09b6f0d
|
https://github.com/DDQXZcp/FYP_ProjectFile_TANG_Zhiheng/tree/b0e3b9d1c5cee61e1d09a32e405244bda09b6f0d
|
AttentionScore
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionScore(nn.Module):
"""
correlation_func = 1, sij = x1^Tx2
correlation_func = 2, sij = (Wx1)D(Wx2)
correlation_func = 3, sij = Relu(Wx1)DRelu(Wx2)
correlation_func = 4, sij = x1^TWx2
correlation_func = 5, sij = Relu(Wx1)DRelu(Wx2)
"""
def __init__(self, input_size, hidden_size, correlation_func=1,
do_similarity=False):
super(AttentionScore, self).__init__()
self.correlation_func = correlation_func
self.hidden_size = hidden_size
if correlation_func == 2 or correlation_func == 3:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
if do_similarity:
self.diagonal = Parameter(torch.ones(1, 1, 1) / hidden_size **
0.5, requires_grad=False)
else:
self.diagonal = Parameter(torch.ones(1, 1, hidden_size),
requires_grad=True)
if correlation_func == 4:
self.linear = nn.Linear(input_size, input_size, bias=False)
if correlation_func == 5:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
def forward(self, x1, x2, x2_mask):
"""
Input:
x1: batch x word_num1 x dim
x2: batch x word_num2 x dim
Output:
scores: batch x word_num1 x word_num2
"""
x1_rep = x1
x2_rep = x2
batch = x1_rep.size(0)
word_num1 = x1_rep.size(1)
word_num2 = x2_rep.size(1)
dim = x1_rep.size(2)
if self.correlation_func == 2 or self.correlation_func == 3:
x1_rep = self.linear(x1_rep.contiguous().view(-1, dim)).view(batch,
word_num1, self.hidden_size)
x2_rep = self.linear(x2_rep.contiguous().view(-1, dim)).view(batch,
word_num2, self.hidden_size)
if self.correlation_func == 3:
x1_rep = F.relu(x1_rep)
x2_rep = F.relu(x2_rep)
x1_rep = x1_rep * self.diagonal.expand_as(x1_rep)
if self.correlation_func == 4:
x2_rep = self.linear(x2_rep.contiguous().view(-1, dim)).view(batch,
word_num2, dim)
if self.correlation_func == 5:
x1_rep = self.linear(x1_rep.contiguous().view(-1, dim)).view(batch,
word_num1, self.hidden_size)
x2_rep = self.linear(x2_rep.contiguous().view(-1, dim)).view(batch,
word_num2, self.hidden_size)
x1_rep = F.relu(x1_rep)
x2_rep = F.relu(x2_rep)
scores = x1_rep.bmm(x2_rep.transpose(1, 2))
empty_mask = x2_mask.eq(0).expand_as(scores)
scores.data.masked_fill_(empty_mask.data, -float('inf'))
alpha_flat = F.softmax(scores, dim=-1)
return alpha_flat
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_masked_fill_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x2, tmp20, xmask)
tl.store(out_ptr1 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x4, xmask)
tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x4, tmp10, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_masked_fill_0[grid(16)](arg2_1, buf0,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused__softmax_masked_fill_1[grid(64)](buf3, arg2_1,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg2_1
del buf1
del buf2
return buf3,
class AttentionScoreNew(nn.Module):
"""
correlation_func = 1, sij = x1^Tx2
correlation_func = 2, sij = (Wx1)D(Wx2)
correlation_func = 3, sij = Relu(Wx1)DRelu(Wx2)
correlation_func = 4, sij = x1^TWx2
correlation_func = 5, sij = Relu(Wx1)DRelu(Wx2)
"""
def __init__(self, input_size, hidden_size, correlation_func=1,
do_similarity=False):
super(AttentionScoreNew, self).__init__()
self.correlation_func = correlation_func
self.hidden_size = hidden_size
if correlation_func == 2 or correlation_func == 3:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
if do_similarity:
self.diagonal = Parameter(torch.ones(1, 1, 1) / hidden_size **
0.5, requires_grad=False)
else:
self.diagonal = Parameter(torch.ones(1, 1, hidden_size),
requires_grad=True)
if correlation_func == 4:
self.linear = nn.Linear(input_size, input_size, bias=False)
if correlation_func == 5:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
BruceWen120/neurips-reproducibility-challenge-2019
|
AttentionScore
| false
| 8,945
|
[
"Apache-2.0"
] | 0
|
b0635aefe83e3f895ce0991913824e861bb7d02d
|
https://github.com/BruceWen120/neurips-reproducibility-challenge-2019/tree/b0635aefe83e3f895ce0991913824e861bb7d02d
|
MyGlobalAvgPool2d
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
class MyGlobalAvgPool2d(nn.Module):
def __init__(self, keep_dim=True):
super(MyGlobalAvgPool2d, self).__init__()
self.keep_dim = keep_dim
def forward(self, x):
return x.mean(3, keepdim=self.keep_dim).mean(2, keepdim=self.keep_dim)
def __repr__(self):
return 'MyGlobalAvgPool2d(keep_dim=%s)' % self.keep_dim
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MyGlobalAvgPool2dNew(nn.Module):
def __init__(self, keep_dim=True):
super(MyGlobalAvgPool2dNew, self).__init__()
self.keep_dim = keep_dim
def __repr__(self):
return 'MyGlobalAvgPool2d(keep_dim=%s)' % self.keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AlbertiPot/once-for-all
|
MyGlobalAvgPool2d
| false
| 8,946
|
[
"MIT"
] | 0
|
092b9e6184be353383396761ea5ec61d67152645
|
https://github.com/AlbertiPot/once-for-all/tree/092b9e6184be353383396761ea5ec61d67152645
|
Hswish
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def __repr__(self):
return 'Hswish()'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HswishNew(nn.Module):
def __init__(self, inplace=True):
super(HswishNew, self).__init__()
self.inplace = inplace
def __repr__(self):
return 'Hswish()'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AlbertiPot/once-for-all
|
Hswish
| false
| 8,947
|
[
"MIT"
] | 0
|
092b9e6184be353383396761ea5ec61d67152645
|
https://github.com/AlbertiPot/once-for-all/tree/092b9e6184be353383396761ea5ec61d67152645
|
HuberLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class HuberLoss(nn.Module):
def __init__(self, delta=1):
super().__init__()
self.huber_loss_delta1 = nn.SmoothL1Loss()
self.delta = delta
def forward(self, x, x_hat):
loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta)
return loss * self.delta * self.delta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = tmp6 < tmp1
tmp8 = tmp6 * tmp6
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = tmp10 * tmp1
tmp12 = tmp6 - tmp9
tmp13 = tl.where(tmp7, tmp11, tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_smooth_l1_loss_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class HuberLossNew(nn.Module):
def __init__(self, delta=1):
super().__init__()
self.huber_loss_delta1 = nn.SmoothL1Loss()
self.delta = delta
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Altriaex/d4rl_evaluations
|
HuberLoss
| false
| 8,948
|
[
"Apache-2.0"
] | 0
|
ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
https://github.com/Altriaex/d4rl_evaluations/tree/ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
Block
|
import math
import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, embedding_size):
super(MLP, self).__init__()
self.dense_h_to_4h = nn.Linear(embedding_size, embedding_size * 4)
self.dense_4h_to_h = nn.Linear(embedding_size * 4, embedding_size)
self.act = nn.functional.gelu
def forward(self, x):
h = self.act(self.dense_h_to_4h(x))
h2 = self.dense_4h_to_h(h)
return h2
class Attention(nn.Module):
def __init__(self, embedding_size, num_attention_heads,
attention_dropout, residual_dropout):
super(Attention, self).__init__()
self.num_attention_heads = num_attention_heads
self.size_per_head = embedding_size // num_attention_heads
self.embedding_size = embedding_size
self.query_key_value = nn.Linear(embedding_size, embedding_size * 3)
self.attn_drop = nn.Dropout(attention_dropout)
self.resid_drop = nn.Dropout(residual_dropout)
self.dense = nn.Linear(embedding_size, embedding_size)
def split_heads(self, x):
x = x.reshape([-1, self.seq_len, self.num_attention_heads, self.
size_per_head])
return x.permute(0, 2, 1, 3)
def forward(self, x, kv_cache=None):
self.seq_len = x.shape[1]
x = self.query_key_value(x)
q, k, v = torch.split(x, split_size_or_sections=self.embedding_size,
dim=2)
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
if kv_cache is not None:
pk, pv = kv_cache[0], kv_cache[1]
k = torch.cat([pk, k], dim=-2)
v = torch.cat([pv, v], dim=-2)
cached_kv = torch.stack([k, v])
attn = torch.matmul(q, k.transpose(-1, -2))
attn = attn / math.sqrt(self.size_per_head)
attention_mask = torch.tril(torch.ones(self.seq_len, self.seq_len,
dtype=torch.float32, device=x.device))
attention_mask = attention_mask.reshape([1, 1, self.seq_len, self.
seq_len])
attn = attn * attention_mask - 10000.0 * (1.0 - attention_mask)
attn = nn.Softmax(dim=-1)(attn)
attn = self.attn_drop(attn)
y = torch.matmul(attn, v)
y = y.permute(0, 2, 1, 3)
y = torch.reshape(y, [-1, self.seq_len, self.embedding_size])
y = self.resid_drop(self.dense(y))
return y, cached_kv
class Block(nn.Module):
def __init__(self, embedding_size, num_attention_heads,
attention_dropout, residual_dropout):
super(Block, self).__init__()
self.input_layernorm = nn.LayerNorm(embedding_size, eps=1e-05)
self.attention = Attention(embedding_size, num_attention_heads,
attention_dropout, residual_dropout)
self.post_attention_layernorm = nn.LayerNorm(embedding_size, eps=1e-05)
self.mlp = MLP(embedding_size)
def forward(self, x, kv_cache=None):
attn, cached_kv = self.attention(self.input_layernorm(x), kv_cache=
kv_cache)
x = x + attn
z = self.post_attention_layernorm(x)
z = self.mlp(z)
x = x + z
return x, cached_kv
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_size': 4, 'num_attention_heads': 4,
'attention_dropout': 0.5, 'residual_dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 + x0 + 12 * x1 + 48 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 12 * x1 + 48 * (-4 + x2)), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_tril_5(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0 + -1 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_mul_rsub_sub_6(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = -1 * x0
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 <= tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp1, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tmp1 - tmp7
tmp10 = 10000.0
tmp11 = tmp9 * tmp10
tmp12 = tmp8 - tmp11
tmp14 = tmp13 * tmp1
tmp15 = 1 + -1 * x0
tmp16 = tmp15 <= tmp4
tmp17 = tl.where(tmp16, tmp1, tmp6)
tmp18 = tmp14 * tmp17
tmp19 = tmp1 - tmp17
tmp20 = tmp19 * tmp10
tmp21 = tmp18 - tmp20
tmp22 = triton_helpers.maximum(tmp12, tmp21)
tmp24 = tmp23 * tmp1
tmp25 = 2 + -1 * x0
tmp26 = tmp25 <= tmp4
tmp27 = tl.where(tmp26, tmp1, tmp6)
tmp28 = tmp24 * tmp27
tmp29 = tmp1 - tmp27
tmp30 = tmp29 * tmp10
tmp31 = tmp28 - tmp30
tmp32 = triton_helpers.maximum(tmp22, tmp31)
tmp34 = tmp33 * tmp1
tmp35 = 3 + -1 * x0
tmp36 = tmp35 <= tmp4
tmp37 = tl.where(tmp36, tmp1, tmp6)
tmp38 = tmp34 * tmp37
tmp39 = tmp1 - tmp37
tmp40 = tmp39 * tmp10
tmp41 = tmp38 - tmp40
tmp42 = triton_helpers.maximum(tmp32, tmp41)
tmp43 = tmp12 - tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = tmp21 - tmp42
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp44 + tmp46
tmp48 = tmp31 - tmp42
tmp49 = tl_math.exp(tmp48)
tmp50 = tmp47 + tmp49
tmp51 = tmp41 - tmp42
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp50 + tmp52
tl.store(out_ptr0 + x2, tmp42, xmask)
tl.store(out_ptr1 + x2, tmp53, xmask)
@triton.jit
def triton_poi_fused__softmax_div_mul_rsub_sub_7(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x4 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp13 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = x0 + -1 * x1
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tmp3 <= tmp4
tmp6 = 0.0
tmp7 = tl.where(tmp5, tmp1, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tmp1 - tmp7
tmp10 = 10000.0
tmp11 = tmp9 * tmp10
tmp12 = tmp8 - tmp11
tmp14 = tmp12 - tmp13
tmp15 = tl_math.exp(tmp14)
tmp17 = tmp15 / tmp16
tl.store(in_out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_clone_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((8, 4, 4, 1), (16, 1, 4, 16), torch.float32)
triton_poi_fused_stack_2[grid(128)](buf3, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf3, buf6, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_tril_5[grid(16)](buf8, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_mul_rsub_sub_6[grid(64)](buf7, buf9,
buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused__softmax_div_mul_rsub_sub_7[grid(256)](buf11, buf9,
buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_8[grid(16, 4)](buf3, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf13 = reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 1), 0)
del buf10
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_9[grid(16, 4)](buf13, buf14, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf13, (16, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf1
del buf1
buf17 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_10[grid(16)](primals_3,
buf15, primals_7, buf16, buf17, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_11[grid(64)](primals_3,
buf15, primals_7, buf16, buf17, primals_8, primals_9, buf18, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf16
del buf17
del primals_9
buf19 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf18, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf19)
del primals_11
buf20 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_12[grid(256)](buf19, buf20, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_13[grid(64)](buf22, primals_3, buf15,
primals_7, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
return buf22, reinterpret_tensor(buf4, (2, 4, 4, 4, 1), (64, 16, 1, 4,
4), 0), primals_3, primals_7, primals_8, reinterpret_tensor(buf2, (
16, 4), (4, 1), 0), buf8, buf11, reinterpret_tensor(buf14, (16, 4),
(4, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), buf19, reinterpret_tensor(buf20, (16, 16), (16, 1), 0
), primals_12, primals_10, primals_6, reinterpret_tensor(buf12, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0), primals_4
class MLP(nn.Module):
def __init__(self, embedding_size):
super(MLP, self).__init__()
self.dense_h_to_4h = nn.Linear(embedding_size, embedding_size * 4)
self.dense_4h_to_h = nn.Linear(embedding_size * 4, embedding_size)
self.act = nn.functional.gelu
def forward(self, x):
h = self.act(self.dense_h_to_4h(x))
h2 = self.dense_4h_to_h(h)
return h2
class Attention(nn.Module):
def __init__(self, embedding_size, num_attention_heads,
attention_dropout, residual_dropout):
super(Attention, self).__init__()
self.num_attention_heads = num_attention_heads
self.size_per_head = embedding_size // num_attention_heads
self.embedding_size = embedding_size
self.query_key_value = nn.Linear(embedding_size, embedding_size * 3)
self.attn_drop = nn.Dropout(attention_dropout)
self.resid_drop = nn.Dropout(residual_dropout)
self.dense = nn.Linear(embedding_size, embedding_size)
def split_heads(self, x):
x = x.reshape([-1, self.seq_len, self.num_attention_heads, self.
size_per_head])
return x.permute(0, 2, 1, 3)
def forward(self, x, kv_cache=None):
self.seq_len = x.shape[1]
x = self.query_key_value(x)
q, k, v = torch.split(x, split_size_or_sections=self.embedding_size,
dim=2)
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
if kv_cache is not None:
pk, pv = kv_cache[0], kv_cache[1]
k = torch.cat([pk, k], dim=-2)
v = torch.cat([pv, v], dim=-2)
cached_kv = torch.stack([k, v])
attn = torch.matmul(q, k.transpose(-1, -2))
attn = attn / math.sqrt(self.size_per_head)
attention_mask = torch.tril(torch.ones(self.seq_len, self.seq_len,
dtype=torch.float32, device=x.device))
attention_mask = attention_mask.reshape([1, 1, self.seq_len, self.
seq_len])
attn = attn * attention_mask - 10000.0 * (1.0 - attention_mask)
attn = nn.Softmax(dim=-1)(attn)
attn = self.attn_drop(attn)
y = torch.matmul(attn, v)
y = y.permute(0, 2, 1, 3)
y = torch.reshape(y, [-1, self.seq_len, self.embedding_size])
y = self.resid_drop(self.dense(y))
return y, cached_kv
class BlockNew(nn.Module):
def __init__(self, embedding_size, num_attention_heads,
attention_dropout, residual_dropout):
super(BlockNew, self).__init__()
self.input_layernorm = nn.LayerNorm(embedding_size, eps=1e-05)
self.attention = Attention(embedding_size, num_attention_heads,
attention_dropout, residual_dropout)
self.post_attention_layernorm = nn.LayerNorm(embedding_size, eps=1e-05)
self.mlp = MLP(embedding_size)
def forward(self, input_0):
primals_1 = self.input_layernorm.weight
primals_2 = self.input_layernorm.bias
primals_4 = self.attention.query_key_value.weight
primals_5 = self.attention.query_key_value.bias
primals_6 = self.attention.dense.weight
primals_7 = self.attention.dense.bias
primals_8 = self.post_attention_layernorm.weight
primals_9 = self.post_attention_layernorm.bias
primals_10 = self.mlp.dense_h_to_4h.weight
primals_11 = self.mlp.dense_h_to_4h.bias
primals_12 = self.mlp.dense_4h_to_h.weight
primals_13 = self.mlp.dense_4h_to_h.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
AeroXi/CPM-Generate-Pytorch
|
Block
| false
| 8,949
|
[
"Apache-2.0"
] | 0
|
a1530ad2848a690c6e1557f996fe58538fe86884
|
https://github.com/AeroXi/CPM-Generate-Pytorch/tree/a1530ad2848a690c6e1557f996fe58538fe86884
|
LayerNorm
|
import torch
import torch.nn as nn
import torch.utils.data
class LayerNorm(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
output = (x - mean) / (std + self.eps)
if self.scale:
output = output * self.scale_param
if self.center:
output = output + self.center_param
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-06
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 + tmp28
tl.store(out_ptr0 + x2, tmp29, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_std_sub_0[grid(256)](primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LayerNormNew(nn.Module):
"""
Simple 1D LayerNorm.
"""
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, input_0):
primals_2 = self.center_param
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Altriaex/d4rl_evaluations
|
LayerNorm
| false
| 8,950
|
[
"Apache-2.0"
] | 0
|
ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
https://github.com/Altriaex/d4rl_evaluations/tree/ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
softCrossEntropy
|
import torch
from torch import nn
import torch.nn.functional as fcnal
class softCrossEntropy(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropy, self).__init__()
self.alpha = alpha
return
def forward(self, inputs, target, true_labels):
"""
:param inputs: predictions
:param target: target (soft) labels
:param true_labels: true (hard) labels
:return: loss
"""
KD_loss = self.alpha
KD_loss *= nn.KLDivLoss(size_average=False)(fcnal.log_softmax(
inputs, dim=1), fcnal.softmax(target, dim=1))
KD_loss += (1 - self.alpha) * fcnal.cross_entropy(inputs, true_labels)
return KD_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr2 + r3, None)
tmp37 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr2 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr2 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr2 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr3 + r3, None)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp38 = tl_math.exp(tmp37)
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp44 + tmp46
tmp48 = tl_math.log(tmp47)
tmp49 = tmp36 - tmp48
tmp51 = tmp49 * tmp50
tmp52 = tl.broadcast_to(tmp51, [RBLOCK])
tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0))
tmp55 = 0.95
tmp56 = tmp35 * tmp55
tmp57 = -tmp54
tmp58 = 0.015625
tmp59 = tmp57 * tmp58
tmp60 = 0.050000000000000044
tmp61 = tmp59 * tmp60
tmp62 = tmp56 + tmp61
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp62, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf6 = buf3
del buf3
triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2[
grid(1)](buf6, buf0, buf2, buf4, arg2_1, 1, 256, num_warps=2,
num_stages=1)
del arg2_1
del buf0
del buf2
del buf4
return buf6,
class softCrossEntropyNew(torch.nn.Module):
def __init__(self, alpha=0.95):
"""
:param alpha: Strength (0-1) of influence from soft labels in training
"""
super(softCrossEntropyNew, self).__init__()
self.alpha = alpha
return
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Benjamin-Lee/cyphercat
|
softCrossEntropy
| false
| 8,951
|
[
"Apache-2.0"
] | 0
|
d8df0544337d4e7e14c2463264c008b7811d35b3
|
https://github.com/Benjamin-Lee/cyphercat/tree/d8df0544337d4e7e14c2463264c008b7811d35b3
|
LayerNorm
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNormNew, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, input_0):
primals_2 = self.a_2
primals_3 = self.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BruceWen120/neurips-reproducibility-challenge-2019
|
LayerNorm
| false
| 8,952
|
[
"Apache-2.0"
] | 0
|
b0635aefe83e3f895ce0991913824e861bb7d02d
|
https://github.com/BruceWen120/neurips-reproducibility-challenge-2019/tree/b0635aefe83e3f895ce0991913824e861bb7d02d
|
Value
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Value(nn.Module):
def __init__(self, state_dim, action_dim):
super(Value, self).__init__()
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, state):
v = F.relu(self.l1(state))
v = F.relu(self.l2(v))
v = self.l3(v)
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (1, 300), (300, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf8, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf7, 19200, XBLOCK=128, num_warps=4, num_stages=1
)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=256, num_warps=4, num_stages=1)
del buf3
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, primals_6, buf7, primals_4, buf8
class ValueNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(ValueNew, self).__init__()
self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Altriaex/d4rl_evaluations
|
Value
| false
| 8,953
|
[
"Apache-2.0"
] | 0
|
ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
https://github.com/Altriaex/d4rl_evaluations/tree/ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
DecoderSlot
|
import torch
from torch import nn
class DecoderSlot(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.ConvTranspose2d(in_channels=66, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_2 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_3 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_4 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_5 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=2)
self.conv_6 = nn.ConvTranspose2d(in_channels=64, out_channels=4,
kernel_size=2)
def forward(self, inputs):
out = self.conv_1(inputs)
out = self.conv_2(out)
out = self.conv_3(out)
out = self.conv_4(out)
out = self.conv_5(out)
out = self.conv_6(out)
return out
def get_inputs():
return [torch.rand([4, 66, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4224
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 264
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 66
y1 = yindex // 66
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 66 * x2 + 1056 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 30976
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 160000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 719104
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3041536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3097600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 12321
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 49284 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 12321 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (66, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 66, 4, 4), (1056, 16, 4, 1))
assert_size_stride(primals_4, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((66, 64, 5, 5), (1600, 1, 320, 64), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4224, 25)](primals_1, buf0, 4224, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 66, 4, 4), (1056, 1, 264, 66), torch.
float32)
triton_poi_fused_1[grid(264, 16)](primals_3, buf1, 264, 16, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch
.float32)
triton_poi_fused_2[grid(4096, 25)](primals_4, buf2, 4096, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch
.float32)
triton_poi_fused_2[grid(4096, 25)](primals_6, buf3, 4096, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch
.float32)
triton_poi_fused_2[grid(4096, 25)](primals_8, buf4, 4096, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((64, 64, 2, 2), (256, 1, 128, 64), torch.
float32)
triton_poi_fused_3[grid(4096, 4)](primals_10, buf5, 4096, 4, XBLOCK
=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf6 = empty_strided_cuda((64, 4, 2, 2), (16, 1, 8, 4), torch.float32)
triton_poi_fused_4[grid(256, 4)](primals_12, buf6, 256, 4, XBLOCK=4,
YBLOCK=256, num_warps=4, num_stages=1)
del primals_12
buf7 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 11, 11), (7744, 1, 704, 64))
buf8 = buf7
del buf7
triton_poi_fused_convolution_5[grid(30976)](buf8, primals_2, 30976,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf9 = extern_kernels.convolution(buf8, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 25, 25), (40000, 1, 1600, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_6[grid(160000)](buf10, primals_5,
160000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf11 = extern_kernels.convolution(buf10, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 53, 53), (179776, 1, 3392, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_7[grid(719104)](buf12, primals_7,
719104, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf13 = extern_kernels.convolution(buf12, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 109, 109), (760384, 1, 6976, 64))
buf14 = buf13
del buf13
triton_poi_fused_convolution_8[grid(3041536)](buf14, primals_9,
3041536, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf15 = extern_kernels.convolution(buf14, buf5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 64, 110, 110), (774400, 1, 7040, 64))
buf16 = buf15
del buf15
triton_poi_fused_convolution_9[grid(3097600)](buf16, primals_11,
3097600, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf16, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 111, 111), (49284, 1, 444, 4))
buf18 = empty_strided_cuda((4, 4, 111, 111), (49284, 12321, 111, 1),
torch.float32)
triton_poi_fused_convolution_10[grid(16, 12321)](buf17, primals_13,
buf18, 16, 12321, XBLOCK=1024, YBLOCK=1, num_warps=4, num_stages=1)
del buf17
del primals_13
return (buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf8, buf10,
buf12, buf14, buf16)
class DecoderSlotNew(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.ConvTranspose2d(in_channels=66, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_2 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_3 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_4 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=5, stride=(2, 2))
self.conv_5 = nn.ConvTranspose2d(in_channels=64, out_channels=64,
kernel_size=2)
self.conv_6 = nn.ConvTranspose2d(in_channels=64, out_channels=4,
kernel_size=2)
def forward(self, input_0):
primals_1 = self.conv_1.weight
primals_2 = self.conv_1.bias
primals_4 = self.conv_2.weight
primals_5 = self.conv_2.bias
primals_6 = self.conv_3.weight
primals_7 = self.conv_3.bias
primals_8 = self.conv_4.weight
primals_9 = self.conv_4.bias
primals_10 = self.conv_5.weight
primals_11 = self.conv_5.bias
primals_12 = self.conv_6.weight
primals_13 = self.conv_6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
CatarauCorina/representation_learning
|
DecoderSlot
| false
| 8,954
|
[
"Apache-2.0"
] | 0
|
bb467761b03e5d8ac20c2f705f3bfdb84a7c3842
|
https://github.com/CatarauCorina/representation_learning/tree/bb467761b03e5d8ac20c2f705f3bfdb84a7c3842
|
Classifier
|
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, latent_size, output_size):
super().__init__()
self.fc1 = nn.Linear(latent_size, 100)
self.relu1 = nn.LeakyReLU(0.2)
self.fc2 = nn.Linear(100, 50)
self.relu2 = nn.LeakyReLU(0.2)
self.fc3 = nn.Linear(50, output_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
out = self.fc1(input)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.sigmoid(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'latent_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 100
x2 = xindex // 1600
x4 = xindex % 1600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4 + 1664 * x2), tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (50, 100), (100, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (4, 50), (50, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 100), (1600, 400, 100, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(6400)](buf0, primals_2, buf1,
buf2, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 50), (1, 100), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.
float32)
triton_poi_fused_leaky_relu_1[grid(3200)](buf3, primals_5, buf4,
buf5, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 50), (50, 1), 0),
reinterpret_tensor(primals_6, (50, 4), (1, 50), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_sigmoid_2[grid(256)](buf7, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 100), (100, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 50), (50, 1), 0
), buf7, primals_6, primals_4
class ClassifierNew(nn.Module):
def __init__(self, latent_size, output_size):
super().__init__()
self.fc1 = nn.Linear(latent_size, 100)
self.relu1 = nn.LeakyReLU(0.2)
self.fc2 = nn.Linear(100, 50)
self.relu2 = nn.LeakyReLU(0.2)
self.fc3 = nn.Linear(50, output_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
BruceWen120/neurips-reproducibility-challenge-2019
|
Classifier
| false
| 8,955
|
[
"Apache-2.0"
] | 0
|
b0635aefe83e3f895ce0991913824e861bb7d02d
|
https://github.com/BruceWen120/neurips-reproducibility-challenge-2019/tree/b0635aefe83e3f895ce0991913824e861bb7d02d
|
Critic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, state, action):
q = F.relu(self.l1(torch.cat([state, action], 1)))
q = F.relu(self.l2(q))
q = self.l3(q)
return q
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (1, 300), (300, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (
1, 400), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
Altriaex/d4rl_evaluations
|
Critic
| false
| 8,956
|
[
"Apache-2.0"
] | 0
|
ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
https://github.com/Altriaex/d4rl_evaluations/tree/ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
Generator
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'vocab': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class GeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, vocab):
super(GeneratorNew, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BruceWen120/neurips-reproducibility-challenge-2019
|
Generator
| false
| 8,957
|
[
"Apache-2.0"
] | 0
|
b0635aefe83e3f895ce0991913824e861bb7d02d
|
https://github.com/BruceWen120/neurips-reproducibility-challenge-2019/tree/b0635aefe83e3f895ce0991913824e861bb7d02d
|
DurationPredictorLoss
|
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
import torch.distributed
class DurationPredictorLoss(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction='mean'):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
reduction (str): Reduction type in loss calculation.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = torch.nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Args:
outputs (Tensor): Batch of prediction durations in log domain (B, T)
targets (LongTensor): Batch of groundtruth durations in linear domain (B, T)
Returns:
Tensor: Mean squared error loss value.
Note:
`outputs` is in log domain but `targets` is in linear domain.
"""
targets = torch.log(targets.float() + self.offset)
loss = self.criterion(outputs, targets)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.multiprocessing
import torch.nn
import torch.optim
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp0 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class DurationPredictorLossNew(torch.nn.Module):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction='mean'):
"""Initilize duration predictor loss module.
Args:
offset (float, optional): Offset value to avoid nan in log domain.
reduction (str): Reduction type in loss calculation.
"""
super(DurationPredictorLossNew, self).__init__()
self.criterion = torch.nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Cardroid/Muskits
|
DurationPredictorLoss
| false
| 8,958
|
[
"Apache-2.0"
] | 0
|
91708bb243bc671e48893a734aee710c356e4bd8
|
https://github.com/Cardroid/Muskits/tree/91708bb243bc671e48893a734aee710c356e4bd8
|
CriticNet
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CriticNet(nn.Module):
def __init__(self, s_dim, a_dim):
super(CriticNet, self).__init__()
self.fcs = nn.Linear(s_dim, 30)
self.fcs.weight.data.normal_(0, 0.1)
self.fca = nn.Linear(a_dim, 30)
self.fca.weight.data.normal_(0, 0.1)
self.out = nn.Linear(30, 1)
self.out.weight.data.normal_(0, 0.1)
def forward(self, s, a):
x = self.fcs(s)
y = self.fca(a)
actions_value = self.out(F.relu(x + y))
return actions_value
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_dim': 4, 'a_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = 0.0
tmp10 = tmp8 <= tmp9
tl.store(in_out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (30, 4), (4, 1))
assert_size_stride(primals_2, (30,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (30, 4), (4, 1))
assert_size_stride(primals_5, (30,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 30), (30, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 30), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(1920)](buf2,
primals_2, buf1, primals_5, buf5, 1920, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_2
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 30),
(30, 1), 0), reinterpret_tensor(primals_7, (30, 1), (1, 30), 0),
alpha=1, beta=1, out=buf4)
del primals_8
return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 30), (30, 1), 0), primals_7, buf5
class CriticNetNew(nn.Module):
def __init__(self, s_dim, a_dim):
super(CriticNetNew, self).__init__()
self.fcs = nn.Linear(s_dim, 30)
self.fcs.weight.data.normal_(0, 0.1)
self.fca = nn.Linear(a_dim, 30)
self.fca.weight.data.normal_(0, 0.1)
self.out = nn.Linear(30, 1)
self.out.weight.data.normal_(0, 0.1)
def forward(self, input_0, input_1):
primals_1 = self.fcs.weight
primals_2 = self.fcs.bias
primals_4 = self.fca.weight
primals_5 = self.fca.bias
primals_7 = self.out.weight
primals_8 = self.out.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
CuteWans/sheep-vs-dog
|
CriticNet
| false
| 8,959
|
[
"MIT"
] | 0
|
4d1542eaa22fd618976757704e584d2c62db5b21
|
https://github.com/CuteWans/sheep-vs-dog/tree/4d1542eaa22fd618976757704e584d2c62db5b21
|
Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
def new_parameter(*size):
out = Parameter(torch.FloatTensor(*size))
torch.nn.init.xavier_normal_(out)
return out
class Attention(nn.Module):
def __init__(self, attention_size):
super(Attention, self).__init__()
self.attention = new_parameter(attention_size, 1)
def forward(self, x_in):
attention_score = torch.matmul(x_in, self.attention).squeeze()
attention_score = F.softmax(attention_score).view(x_in.size(0),
x_in.size(1), 1)
scored_x = x_in * attention_score
condensed_x = torch.sum(scored_x, dim=1)
return condensed_x, attention_score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'attention_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_mul_sum_2[grid(16)](primals_2, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf3, reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0
), primals_2, buf2
def new_parameter(*size):
out = Parameter(torch.FloatTensor(*size))
torch.nn.init.xavier_normal_(out)
return out
class AttentionNew(nn.Module):
def __init__(self, attention_size):
super(AttentionNew, self).__init__()
self.attention = new_parameter(attention_size, 1)
def forward(self, input_0):
primals_1 = self.attention
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
Danil328/Comment-classification
|
Attention
| false
| 8,960
|
[
"Apache-2.0"
] | 0
|
5b355458d7f1fc28921e0df6257564db3da63201
|
https://github.com/Danil328/Comment-classification/tree/5b355458d7f1fc28921e0df6257564db3da63201
|
GeneralizedMeanPooling
|
import torch
import torch.nn as nn
class GeneralizedMeanPooling(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size
).pow(1.0 / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'norm': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tmp11 = 0.25
tmp12 = libdevice.pow(tmp10, tmp11)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_clamp_mean_pow_0[grid(16)](buf1, arg0_1, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GeneralizedMeanPoolingNew(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super(GeneralizedMeanPoolingNew, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AsyaPes/light-reid-master
|
GeneralizedMeanPooling
| false
| 8,961
|
[
"MIT"
] | 0
|
acb4bdd973cdf3832294d8e42442305ab52014f5
|
https://github.com/AsyaPes/light-reid-master/tree/acb4bdd973cdf3832294d8e42442305ab52014f5
|
ActorNet
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class ActorNet(nn.Module):
def __init__(self, s_dim, a_dim):
super(ActorNet, self).__init__()
self.fc1 = nn.Linear(s_dim, 30)
self.fc1.weight.data.normal_(0, 0.1)
self.out = nn.Linear(30, a_dim)
self.out.weight.data.normal_(0, 0.1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.out(x)
x = torch.tanh(x)
actions = (x + 1) * np.pi / 2
return actions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_dim': 4, 'a_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = 3.141592653589793
tmp5 = tmp3 * tmp4
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (30, 4), (4, 1))
assert_size_stride(primals_2, (30,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 30), (30, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf1,
primals_2, buf4, 1920, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 30),
(30, 1), 0), reinterpret_tensor(primals_4, (30, 4), (1, 30), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_tanh_1[grid(256)](buf2, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 30), (30, 1), 0
), buf2, primals_4, buf4
class ActorNetNew(nn.Module):
def __init__(self, s_dim, a_dim):
super(ActorNetNew, self).__init__()
self.fc1 = nn.Linear(s_dim, 30)
self.fc1.weight.data.normal_(0, 0.1)
self.out = nn.Linear(30, a_dim)
self.out.weight.data.normal_(0, 0.1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.out.weight
primals_5 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
CuteWans/sheep-vs-dog
|
ActorNet
| false
| 8,962
|
[
"MIT"
] | 0
|
4d1542eaa22fd618976757704e584d2c62db5b21
|
https://github.com/CuteWans/sheep-vs-dog/tree/4d1542eaa22fd618976757704e584d2c62db5b21
|
Clamp
|
import torch
import torch.nn as nn
class Clamp(nn.Module):
def __init__(self, min, max):
super(Clamp, self).__init__()
self.min = min
self.max = max
def forward(self, x):
return torch.clamp(x, min=self.min, max=self.max)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'min': 4, 'max': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = triton_helpers.minimum(tmp2, tmp1)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ClampNew(nn.Module):
def __init__(self, min, max):
super(ClampNew, self).__init__()
self.min = min
self.max = max
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AsyaPes/light-reid-master
|
Clamp
| false
| 8,963
|
[
"MIT"
] | 0
|
acb4bdd973cdf3832294d8e42442305ab52014f5
|
https://github.com/AsyaPes/light-reid-master/tree/acb4bdd973cdf3832294d8e42442305ab52014f5
|
CoralLayer
|
import torch
import torch.nn as nn
class CoralLayer(nn.Module):
"""Implements CORAL layer
Parameters
-----------
size_in : int
Number of input features for the inputs to the forward method, which
are expected to have shape=(num_examples, num_features).
num_classes : int
Number of classes in the dataset.
preinit_bias : bool (default=True)
If true, it will pre-initialize the biases to descending values in
[0, 1] range instead of initializing it to all zeros. This pre-
initialization scheme results in faster learning and better
generalization performance in practice.
"""
def __init__(self, size_in, num_classes, preinit_bias=True):
super().__init__()
self.size_in, self.size_out = size_in, 1
self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False)
if preinit_bias:
self.coral_bias = torch.nn.Parameter(torch.arange(num_classes -
1, 0, -1).float() / (num_classes - 1))
else:
self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes -
1).float())
def forward(self, x):
"""
Computes forward pass.
Parameters
-----------
x : torch.tensor, shape=(num_examples, num_features)
Input features.
Returns
-----------
logits : torch.tensor, shape=(num_examples, num_classes-1)
"""
return self.coral_weights(x) + self.coral_bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size_in': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3
x0 = xindex % 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(192)](buf0, primals_3, buf1, 192,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class CoralLayerNew(nn.Module):
"""Implements CORAL layer
Parameters
-----------
size_in : int
Number of input features for the inputs to the forward method, which
are expected to have shape=(num_examples, num_features).
num_classes : int
Number of classes in the dataset.
preinit_bias : bool (default=True)
If true, it will pre-initialize the biases to descending values in
[0, 1] range instead of initializing it to all zeros. This pre-
initialization scheme results in faster learning and better
generalization performance in practice.
"""
def __init__(self, size_in, num_classes, preinit_bias=True):
super().__init__()
self.size_in, self.size_out = size_in, 1
self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False)
if preinit_bias:
self.coral_bias = torch.nn.Parameter(torch.arange(num_classes -
1, 0, -1).float() / (num_classes - 1))
else:
self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes -
1).float())
def forward(self, input_0):
primals_3 = self.coral_bias
primals_1 = self.coral_weights.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Dineswar11/dretino
|
CoralLayer
| false
| 8,964
|
[
"MIT"
] | 0
|
f6b1e1043a62f88b1853df1bfaada296710223f7
|
https://github.com/Dineswar11/dretino/tree/f6b1e1043a62f88b1853df1bfaada296710223f7
|
Actor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, state, action):
a = F.relu(self.l1(torch.cat([state, action], 1)))
a = F.relu(self.l2(a))
a = 0.05 * self.max_action * torch.tanh(self.l3(a))
return (a + action).clamp(-self.max_action, self.max_action)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_clamp_ge_le_logical_and_mul_tanh_3(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 0.2
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tmp6 = -4.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 4.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp5 >= tmp6
tmp11 = tmp5 <= tmp8
tmp12 = tmp10 & tmp11
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (400, 8), (8, 1))
assert_size_stride(primals_4, (400,), (1,))
assert_size_stride(primals_5, (300, 400), (400, 1))
assert_size_stride(primals_6, (300,), (1,))
assert_size_stride(primals_7, (4, 300), (300, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 400), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(1600)](buf2, primals_4, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (400, 300), (
1, 400), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(1200)](buf4, primals_6, 1200, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_clamp_ge_le_logical_and_mul_tanh_3[grid(16)](buf5,
primals_2, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, buf0, buf2, buf4, buf5, buf7, primals_7, primals_5
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.l2.weight
primals_6 = self.l2.bias
primals_7 = self.l3.weight
primals_8 = self.l3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
Altriaex/d4rl_evaluations
|
Actor
| false
| 8,965
|
[
"Apache-2.0"
] | 0
|
ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
https://github.com/Altriaex/d4rl_evaluations/tree/ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
Scale
|
import torch
import torch.nn as nn
class Scale(nn.Module):
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class ScaleNew(nn.Module):
def __init__(self, scale=1.0):
super(ScaleNew, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Cynicsss/mmdetection
|
Scale
| false
| 8,966
|
[
"Apache-2.0"
] | 0
|
89e207fc8c8a7ae3663a5cda53d77b2b94cd1ec8
|
https://github.com/Cynicsss/mmdetection/tree/89e207fc8c8a7ae3663a5cda53d77b2b94cd1ec8
|
Conv1dLinear
|
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
import torch.distributed
class Conv1dLinear(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinear, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (torch.Tensor): Batch of input tensors (B, T, in_chans).
Returns:
torch.Tensor: Batch of output tensors (B, T, hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.multiprocessing
import torch.nn
import torch.optim
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 3), (12, 3, 1))
del buf0
buf2 = empty_strided_cuda((4, 3, 4), (12, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(12, 4)](buf1, primals_3, buf2, 12, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (12, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 3, 4), (12, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(48)](buf4, primals_5, 48, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(48)](buf1,
primals_3, buf5, 48, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf2, (12, 4), (4, 1), 0), primals_4, buf5
class Conv1dLinearNew(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinearNew, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Cardroid/Muskits
|
Conv1dLinear
| false
| 8,967
|
[
"Apache-2.0"
] | 0
|
91708bb243bc671e48893a734aee710c356e4bd8
|
https://github.com/Cardroid/Muskits/tree/91708bb243bc671e48893a734aee710c356e4bd8
|
EncoderLayer
|
import torch
import torch.nn as nn
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, ffn_size)
self.gelu = nn.GELU()
self.layer2 = nn.Linear(ffn_size, hidden_size)
def forward(self, x):
x = self.layer1(x)
x = self.gelu(x)
x = self.layer2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = torch.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate,
attention_dropout_rate, num_heads):
super(EncoderLayer, self).__init__()
self.self_attention_norm = nn.LayerNorm(hidden_size)
self.self_attention = MultiHeadAttention(hidden_size,
attention_dropout_rate, num_heads)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size)
self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
def forward(self, x, attn_bias=None):
y = self.self_attention_norm(x)
y = self.self_attention(y, y, y, attn_bias)
y = self.self_attention_dropout(y)
x = x + y
y = self.ffn_norm(x)
y = self.ffn(y)
y = self.ffn_dropout(y)
x = x + y
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'ffn_size': 4, 'dropout_rate': 0.5,
'attention_dropout_rate': 0.5, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_mul_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, primals_5, buf6, 16,
4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf3
triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_7, buf7, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf9
buf11 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(16, 4)](buf5, primals_9, buf11, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_11
buf15 = buf1
del buf1
buf16 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf14,
buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf14,
buf15, buf16, primals_12, primals_13, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf15
del buf16
del primals_13
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf18)
del primals_15
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_9[grid(64)](buf18, buf19, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0)
del buf20
triton_poi_fused_add_10[grid(64)](buf21, primals_3, buf14,
primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_17
return buf21, primals_3, primals_12, reinterpret_tensor(buf2, (16, 4),
(4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0
), buf14, reinterpret_tensor(buf17, (16, 4), (4, 1), 0
), buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0
), primals_16, primals_14, primals_10, reinterpret_tensor(buf11, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
class FeedForwardNetwork(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate):
super(FeedForwardNetwork, self).__init__()
self.layer1 = nn.Linear(hidden_size, ffn_size)
self.gelu = nn.GELU()
self.layer2 = nn.Linear(ffn_size, hidden_size)
def forward(self, x):
x = self.layer1(x)
x = self.gelu(x)
x = self.layer2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, attention_dropout_rate, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.att_size = att_size = hidden_size // num_heads
self.scale = att_size ** -0.5
self.linear_q = nn.Linear(hidden_size, num_heads * att_size)
self.linear_k = nn.Linear(hidden_size, num_heads * att_size)
self.linear_v = nn.Linear(hidden_size, num_heads * att_size)
self.att_dropout = nn.Dropout(attention_dropout_rate)
self.output_layer = nn.Linear(num_heads * att_size, hidden_size)
def forward(self, q, k, v, attn_bias=None):
orig_q_size = q.size()
d_k = self.att_size
d_v = self.att_size
batch_size = q.size(0)
q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k)
k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k)
v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
k = k.transpose(1, 2).transpose(2, 3)
q = q * self.scale
x = torch.matmul(q, k)
if attn_bias is not None:
x = x + attn_bias
x = torch.softmax(x, dim=3)
x = self.att_dropout(x)
x = x.matmul(v)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, -1, self.num_heads * d_v)
x = self.output_layer(x)
assert x.size() == orig_q_size
return x
class EncoderLayerNew(nn.Module):
def __init__(self, hidden_size, ffn_size, dropout_rate,
attention_dropout_rate, num_heads):
super(EncoderLayerNew, self).__init__()
self.self_attention_norm = nn.LayerNorm(hidden_size)
self.self_attention = MultiHeadAttention(hidden_size,
attention_dropout_rate, num_heads)
self.self_attention_dropout = nn.Dropout(dropout_rate)
self.ffn_norm = nn.LayerNorm(hidden_size)
self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate)
self.ffn_dropout = nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_1 = self.self_attention_norm.weight
primals_2 = self.self_attention_norm.bias
primals_4 = self.self_attention.linear_q.weight
primals_5 = self.self_attention.linear_q.bias
primals_6 = self.self_attention.linear_k.weight
primals_7 = self.self_attention.linear_k.bias
primals_8 = self.self_attention.linear_v.weight
primals_9 = self.self_attention.linear_v.bias
primals_10 = self.self_attention.output_layer.weight
primals_11 = self.self_attention.output_layer.bias
primals_12 = self.ffn_norm.weight
primals_13 = self.ffn_norm.bias
primals_14 = self.ffn.layer1.weight
primals_15 = self.ffn.layer1.bias
primals_16 = self.ffn.layer2.weight
primals_17 = self.ffn.layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
|
ChantalMP/Graphormer
|
EncoderLayer
| false
| 8,968
|
[
"MIT"
] | 0
|
5c384d0f2840afc88ee88aeb874f4b1f41d760bf
|
https://github.com/ChantalMP/Graphormer/tree/5c384d0f2840afc88ee88aeb874f4b1f41d760bf
|
ConvWS2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2d, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
return conv_ws_2d(x, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp0 - tmp20
tmp25 = 1e-05
tmp26 = tmp23 + tmp25
tmp27 = tmp24 / tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp23, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp27, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_std_sub_0[grid(4)](buf1, buf5,
primals_1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_1[grid(16)](buf8, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf8, primals_1, primals_3, buf1, buf5, buf6
def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1,
groups=1, eps=1e-05):
c_in = weight.size(0)
weight_flat = weight.view(c_in, -1)
mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
weight = (weight - mean) / (std + eps)
return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
class ConvWS2dNew(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, eps=1e-05):
super(ConvWS2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.eps = eps
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Cynicsss/mmdetection
|
ConvWS2d
| false
| 8,969
|
[
"Apache-2.0"
] | 0
|
89e207fc8c8a7ae3663a5cda53d77b2b94cd1ec8
|
https://github.com/Cynicsss/mmdetection/tree/89e207fc8c8a7ae3663a5cda53d77b2b94cd1ec8
|
ConvRelu
|
import torch
from torch.nn.modules.loss import *
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim import *
from torch.optim.lr_scheduler import *
class ConvRelu(nn.Module):
"""3x3 convolution followed by ReLU activation building block.
"""
def __init__(self, num_in, num_out):
"""Creates a `ConvReLU` building block.
Args:
num_in: number of input feature maps
num_out: number of output feature maps
"""
super().__init__()
self.block = nn.Conv2d(num_in, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, x):
"""
The networks forward pass for which
autograd synthesizes the backwards pass.
Args:
x: the input tensor
Returns:
The networks output tensor.
"""
return F.relu(self.block(x), inplace=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import *
import torch.nn as nn
from torch.nn import *
from torch.optim import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
class ConvReluNew(nn.Module):
"""3x3 convolution followed by ReLU activation building block.
"""
def __init__(self, num_in, num_out):
"""Creates a `ConvReLU` building block.
Args:
num_in: number of input feature maps
num_out: number of output feature maps
"""
super().__init__()
self.block = nn.Conv2d(num_in, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, input_0):
primals_1 = self.block.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
DBusAI/catalyst
|
ConvRelu
| false
| 8,970
|
[
"Apache-2.0"
] | 0
|
4fbdf477ea93b4d3781bf4eb10ae8da1747e4566
|
https://github.com/DBusAI/catalyst/tree/4fbdf477ea93b4d3781bf4eb10ae8da1747e4566
|
SEModule
|
import torch
import torch.nn as nn
import torch.utils.data
from collections import OrderedDict
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
def make_divisible(v, divisor, min_val=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_val:
:return:
"""
if min_val is None:
min_val = divisor
new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class MyModule(nn.Module):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
class MyNetwork(MyModule):
CHANNEL_DIVISIBLE = 8
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
@property
def grouped_block_index(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps, gn_channel_per_group=None, **kwargs):
set_bn_param(self, momentum, eps, gn_channel_per_group, **kwargs)
def get_bn_param(self):
return get_bn_param(self)
def get_parameters(self, keys=None, mode='include'):
if keys is None:
for name, param in self.named_parameters():
if param.requires_grad:
yield param
elif mode == 'include':
for name, param in self.named_parameters():
flag = False
for key in keys:
if key in name:
flag = True
break
if flag and param.requires_grad:
yield param
elif mode == 'exclude':
for name, param in self.named_parameters():
flag = True
for key in keys:
if key in name:
flag = False
break
if flag and param.requires_grad:
yield param
else:
raise ValueError('do not support: %s' % mode)
def weight_parameters(self):
return self.get_parameters()
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def __repr__(self):
return 'Hsigmoid()'
class SEModule(nn.Module):
REDUCTION = 4
def __init__(self, channel, reduction=None):
super(SEModule, self).__init__()
self.channel = channel
self.reduction = SEModule.REDUCTION if reduction is None else reduction
num_mid = make_divisible(self.channel // self.reduction, divisor=
MyNetwork.CHANNEL_DIVISIBLE)
self.fc = nn.Sequential(OrderedDict([('reduce', nn.Conv2d(self.
channel, num_mid, 1, 1, 0, bias=True)), ('relu', nn.ReLU(
inplace=True)), ('expand', nn.Conv2d(num_mid, self.channel, 1,
1, 0, bias=True)), ('h_sigmoid', Hsigmoid(inplace=True))]))
def forward(self, x):
y = x.mean(3, keepdim=True).mean(2, keepdim=True)
y = self.fc(y)
return x * y
def __repr__(self):
return 'SE(channel=%d, reduction=%d)' % (self.channel, self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
from collections import OrderedDict
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 1, 1), (8, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(32)](buf2, primals_3, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_2[grid(256)](
primals_1, buf3, primals_5, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_3[grid(16)](buf3,
primals_5, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf3
del primals_5
return buf4, primals_1, primals_2, primals_4, buf0, buf2, buf5
def make_divisible(v, divisor, min_val=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_val:
:return:
"""
if min_val is None:
min_val = divisor
new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class MyModule(nn.Module):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
class MyNetwork(MyModule):
CHANNEL_DIVISIBLE = 8
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
@property
def grouped_block_index(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps, gn_channel_per_group=None, **kwargs):
set_bn_param(self, momentum, eps, gn_channel_per_group, **kwargs)
def get_bn_param(self):
return get_bn_param(self)
def get_parameters(self, keys=None, mode='include'):
if keys is None:
for name, param in self.named_parameters():
if param.requires_grad:
yield param
elif mode == 'include':
for name, param in self.named_parameters():
flag = False
for key in keys:
if key in name:
flag = True
break
if flag and param.requires_grad:
yield param
elif mode == 'exclude':
for name, param in self.named_parameters():
flag = True
for key in keys:
if key in name:
flag = False
break
if flag and param.requires_grad:
yield param
else:
raise ValueError('do not support: %s' % mode)
def weight_parameters(self):
return self.get_parameters()
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def __repr__(self):
return 'Hsigmoid()'
class SEModuleNew(nn.Module):
REDUCTION = 4
def __init__(self, channel, reduction=None):
super(SEModuleNew, self).__init__()
self.channel = channel
self.reduction = (SEModuleNew.REDUCTION if reduction is None else
reduction)
num_mid = make_divisible(self.channel // self.reduction, divisor=
MyNetwork.CHANNEL_DIVISIBLE)
self.fc = nn.Sequential(OrderedDict([('reduce', nn.Conv2d(self.
channel, num_mid, 1, 1, 0, bias=True)), ('relu', nn.ReLU(
inplace=True)), ('expand', nn.Conv2d(num_mid, self.channel, 1,
1, 0, bias=True)), ('h_sigmoid', Hsigmoid(inplace=True))]))
def __repr__(self):
return 'SE(channel=%d, reduction=%d)' % (self.channel, self.reduction)
def forward(self, input_0):
primals_2 = self.fc.reduce.weight
primals_3 = self.fc.reduce.bias
primals_4 = self.fc.expand.weight
primals_5 = self.fc.expand.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
AlbertiPot/once-for-all
|
SEModule
| false
| 8,971
|
[
"MIT"
] | 0
|
092b9e6184be353383396761ea5ec61d67152645
|
https://github.com/AlbertiPot/once-for-all/tree/092b9e6184be353383396761ea5ec61d67152645
|
ConvLayer
|
import torch
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class ConvLayerNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayerNew, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, input_0):
primals_1 = self.conv2d.weight
primals_3 = self.conv2d.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Chandan-h-509/ignite
|
ConvLayer
| false
| 8,972
|
[
"BSD-3-Clause"
] | 0
|
f8c39828cb1dac49b6ef358cdf77865bf2430106
|
https://github.com/Chandan-h-509/ignite/tree/f8c39828cb1dac49b6ef358cdf77865bf2430106
|
LongCNN
|
import torch
from torch import nn
class LongCNN(nn.Module):
def __init__(self, num_channels, input_shape, name, conv_sizes=[64, 128,
128, 256], lin_size=512):
super(LongCNN, self).__init__()
self.name = name
self.relu = nn.ReLU(inplace=True)
self.do1 = nn.Dropout(p=0.25)
self.do2 = nn.Dropout(p=0.25)
self.conv1 = nn.Conv2d(num_channels, out_channels=conv_sizes[0],
kernel_size=6, stride=3)
self.conv2 = nn.Conv2d(in_channels=conv_sizes[0], out_channels=
conv_sizes[1], kernel_size=4, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(in_channels=conv_sizes[1], out_channels=
conv_sizes[2], kernel_size=3)
self.conv4 = nn.Conv2d(in_channels=conv_sizes[2], out_channels=
conv_sizes[3], kernel_size=3)
self.pool2 = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(in_features=conv_sizes[-1], out_features=lin_size)
self.fc2 = nn.Linear(in_features=lin_size, out_features=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.pool1(x)
x = self.relu(x)
x = self.conv3(x)
x = self.pool1(x)
x = self.relu(x)
x = self.conv4(x)
x = self.pool1(x)
x = self.relu(x)
x = self.pool2(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
@property
def save_path(self):
return self.name + '.chkpt'
def get_inputs():
return [torch.rand([4, 4, 256, 256])]
def get_init_inputs():
return [[], {'num_channels': 4, 'input_shape': 4, 'name': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 65536 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 262144 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 860672
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_7(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 20
x2 = xindex // 2560 % 20
x3 = xindex // 51200
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 10496 * x2 + 215168 * x3), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 10496 * x2 + 215168 *
x3), None)
tmp7 = tl.load(in_ptr0 + (5248 + x0 + 256 * x1 + 10496 * x2 + 215168 *
x3), None)
tmp12 = tl.load(in_ptr0 + (5376 + x0 + 256 * x1 + 10496 * x2 + 215168 *
x3), None)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + x4, tmp15, None)
tl.store(out_ptr1 + x4, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_9(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 41472
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = xindex // 128 % 9
x2 = xindex // 1152
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4608 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4608 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (2304 + x0 + 256 * x1 + 4608 * x2), xmask)
tmp12 = tl.load(in_ptr0 + (2432 + x0 + 256 * x1 + 4608 * x2), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_11(in_ptr0
, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 3
x2 = xindex // 768 % 3
x3 = xindex // 2304
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 3584 * x2 + 12544 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 3584 * x2 + 12544 * x3),
xmask)
tmp7 = tl.load(in_ptr0 + (1792 + x0 + 512 * x1 + 3584 * x2 + 12544 * x3
), xmask)
tmp12 = tl.load(in_ptr0 + (2048 + x0 + 512 * x1 + 3584 * x2 + 12544 *
x3), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp20, xmask)
@triton.jit
def triton_per_fused_max_pool2d_with_indices_mean_relu_12(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex % 3
r3 = rindex // 3
x0 = xindex % 256
x1 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * r2 + 3584 * r3 + 12544 * x1),
rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * r2 + 3584 * r3 + 12544 * x1),
rmask & xmask, other=0.0)
tmp3 = tl.load(in_ptr0 + (1792 + x0 + 512 * r2 + 3584 * r3 + 12544 * x1
), rmask & xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (2048 + x0 + 512 * r2 + 3584 * r3 + 12544 * x1
), rmask & xmask, other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = 9.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1))
assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 256), (256, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (1, 512), (512, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4, 6, 6), (144, 1, 24, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(256, 36)](primals_1, buf0, 256, 36, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 256, 256), (262144, 1, 1024, 4),
torch.float32)
triton_poi_fused_1[grid(16, 65536)](primals_3, buf1, 16, 65536,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_2[grid(8192, 16)](primals_4, buf2, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_3[grid(16384, 9)](primals_6, buf3, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(3, 3),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 64, 84, 84), (451584, 1, 5376, 64))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(1806336)](buf6, primals_2,
1806336, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 128, 41, 41), (215168, 1, 5248, 128))
buf8 = buf7
del buf7
triton_poi_fused_convolution_6[grid(860672)](buf8, primals_5,
860672, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 128, 20, 20), (51200, 1, 2560, 128),
torch.int8)
buf10 = empty_strided_cuda((4, 128, 20, 20), (51200, 1, 2560, 128),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_7[grid(204800)](buf8,
buf9, buf10, 204800, XBLOCK=1024, num_warps=4, num_stages=1)
buf11 = extern_kernels.convolution(buf10, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 18, 18), (41472, 1, 2304, 128))
buf12 = buf11
del buf11
triton_poi_fused_convolution_8[grid(165888)](buf12, primals_7,
165888, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 128, 9, 9), (10368, 1, 1152, 128),
torch.int8)
buf14 = empty_strided_cuda((4, 128, 9, 9), (10368, 1, 1152, 128),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_9[grid(41472)](buf12,
buf13, buf14, 41472, XBLOCK=512, num_warps=4, num_stages=1)
buf15 = extern_kernels.convolution(buf14, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 7, 7), (12544, 1, 1792, 256))
buf16 = buf15
del buf15
triton_poi_fused_convolution_10[grid(50176)](buf16, primals_9,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256),
torch.int8)
buf24 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_11[
grid(9216)](buf16, buf17, buf24, 9216, XBLOCK=128, num_warps=4,
num_stages=1)
buf18 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024),
torch.float32)
buf19 = buf18
del buf18
triton_per_fused_max_pool2d_with_indices_mean_relu_12[grid(1024)](buf19
, buf16, 1024, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf20 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (4, 256), (256, 1), 0),
reinterpret_tensor(primals_10, (256, 512), (1, 256), 0), out=buf20)
buf21 = buf20
del buf20
triton_poi_fused_relu_13[grid(2048)](buf21, primals_11, 2048,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf23 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, buf21, reinterpret_tensor(
primals_12, (512, 1), (1, 512), 0), alpha=1, beta=1, out=buf23)
del primals_13
return (buf23, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf9, buf10,
buf12, buf13, buf14, buf16, buf17, reinterpret_tensor(buf19, (4,
256), (256, 1), 0), buf21, primals_12, primals_10, buf24)
class LongCNNNew(nn.Module):
def __init__(self, num_channels, input_shape, name, conv_sizes=[64, 128,
128, 256], lin_size=512):
super(LongCNNNew, self).__init__()
self.name = name
self.relu = nn.ReLU(inplace=True)
self.do1 = nn.Dropout(p=0.25)
self.do2 = nn.Dropout(p=0.25)
self.conv1 = nn.Conv2d(num_channels, out_channels=conv_sizes[0],
kernel_size=6, stride=3)
self.conv2 = nn.Conv2d(in_channels=conv_sizes[0], out_channels=
conv_sizes[1], kernel_size=4, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(in_channels=conv_sizes[1], out_channels=
conv_sizes[2], kernel_size=3)
self.conv4 = nn.Conv2d(in_channels=conv_sizes[2], out_channels=
conv_sizes[3], kernel_size=3)
self.pool2 = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Linear(in_features=conv_sizes[-1], out_features=lin_size)
self.fc2 = nn.Linear(in_features=lin_size, out_features=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
@property
def save_path(self):
return self.name + '.chkpt'
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
Csaba591/LHYP
|
LongCNN
| false
| 8,973
|
[
"MIT"
] | 0
|
d1b07381b9dc39210d338b60908acfa64c476b8e
|
https://github.com/Csaba591/LHYP/tree/d1b07381b9dc39210d338b60908acfa64c476b8e
|
FC_Q
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.q1 = nn.Linear(state_dim, 256)
self.q2 = nn.Linear(256, 256)
self.q3 = nn.Linear(256, num_actions)
self.i1 = nn.Linear(state_dim, 256)
self.i2 = nn.Linear(256, 256)
self.i3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.q1(state))
q = F.relu(self.q2(q))
i = F.relu(self.i1(state))
i = F.relu(self.i2(i))
i = F.relu(self.i3(i))
return self.q3(q), F.log_softmax(i, dim=1), i
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_threshold_backward_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = tmp0 <= tmp9
tl.store(out_ptr0 + x3, tmp8, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 4), (4, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256), (256, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 256), (256, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 256), (256, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf17, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 256), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf3
buf15 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf4,
primals_7, buf15, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_8, (256, 256), (1, 256), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf5
buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf6,
primals_9, buf14, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_10, (256, 4), (1, 256), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_relu_1[grid(256)](buf8, primals_11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf9 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf16 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf9,
primals_5, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_12, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__log_softmax_threshold_backward_2[grid(256)](buf8,
buf11, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_3[grid(256)](buf11, buf12, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf11
return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0),
buf12, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(buf4, (64, 256), (256, 1), 0),
reinterpret_tensor(buf6, (64, 256), (256, 1), 0),
reinterpret_tensor(buf9, (64, 256), (256, 1), 0), buf12, primals_12,
buf13, primals_10, buf14, primals_8, buf15, buf16, primals_4, buf17)
class FC_QNew(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_QNew, self).__init__()
self.q1 = nn.Linear(state_dim, 256)
self.q2 = nn.Linear(256, 256)
self.q3 = nn.Linear(256, num_actions)
self.i1 = nn.Linear(state_dim, 256)
self.i2 = nn.Linear(256, 256)
self.i3 = nn.Linear(256, num_actions)
def forward(self, input_0):
primals_1 = self.q1.weight
primals_2 = self.q1.bias
primals_4 = self.q2.weight
primals_5 = self.q2.bias
primals_10 = self.q3.weight
primals_11 = self.q3.bias
primals_6 = self.i1.weight
primals_7 = self.i1.bias
primals_8 = self.i2.weight
primals_9 = self.i2.bias
primals_12 = self.i3.weight
primals_13 = self.i3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1], output[2]
|
Altriaex/d4rl_evaluations
|
FC_Q
| false
| 8,974
|
[
"Apache-2.0"
] | 0
|
ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
https://github.com/Altriaex/d4rl_evaluations/tree/ceb34c04e98af9332c6338a1414c0c2aa5fea68b
|
InputInjection
|
import torch
import torch.nn as nn
import torch._C
import torch.serialization
class InputInjection(nn.Module):
"""Downsampling module for CGNet."""
def __init__(self, num_downsampling):
super(InputInjection, self).__init__()
self.pool = nn.ModuleList()
for i in range(num_downsampling):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
for pool in self.pool:
x = pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_downsampling': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x3), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x3), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x3), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x3), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp49 & xmask,
eviction_policy='evict_last', other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) *
(2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 *
x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 +
2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)
)
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x4, tmp53, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x0), tmp11 & xmask, eviction_policy
='evict_last', other=0.0)
tmp13 = tmp12 + tmp7
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp18 & xmask, eviction_policy
='evict_last', other=0.0)
tmp20 = tmp19 + tmp13
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp21 & xmask, eviction_policy
='evict_last', other=0.0)
tmp23 = tmp22 + tmp20
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x0, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp25 + tmp23
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp28 + tmp26
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp31 + tmp29
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), tmp36 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = tl.full([1], 9, tl.int32)
tmp40 = tmp38 / tmp39
tmp41 = tmp0 < tmp14
tmp42 = tmp2 & tmp41
tmp42 & tmp42
tmp44 = tmp1 < tmp14
tmp45 = tmp8 & tmp44
tmp42 & tmp45
tmp47 = tmp40 + tmp40
tmp48 = tmp14 < tmp14
tmp49 = tmp15 & tmp48
tmp42 & tmp49
tmp51 = tmp40 + tmp47
tmp45 & tmp42
tmp53 = tmp40 + tmp51
tmp45 & tmp45
tmp55 = tmp40 + tmp53
tmp45 & tmp49
tmp57 = tmp40 + tmp55
tmp49 & tmp42
tmp59 = tmp40 + tmp57
tmp49 & tmp45
tmp61 = tmp40 + tmp59
tmp49 & tmp49
tmp63 = tmp40 + tmp61
tmp64 = tmp63 / tmp39
tmp65 = tmp64 + tmp64
tmp66 = tmp64 + tmp65
tmp67 = tmp64 + tmp66
tmp68 = tmp64 + tmp67
tmp69 = tmp64 + tmp68
tmp70 = tmp64 + tmp69
tmp71 = tmp64 + tmp70
tmp72 = tmp64 + tmp71
tmp73 = tmp72 / tmp39
tl.store(in_out_ptr0 + x0, tmp73, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = buf1
del buf1
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf2
triton_poi_fused_avg_pool2d_1[grid(16)](buf3, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
return buf3,
class InputInjectionNew(nn.Module):
"""Downsampling module for CGNet."""
def __init__(self, num_downsampling):
super(InputInjectionNew, self).__init__()
self.pool = nn.ModuleList()
for i in range(num_downsampling):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AnonSubmission6150/submission6150
|
InputInjection
| false
| 8,975
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
Policy
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf4, primals_4, buf5
class PolicyNew(nn.Module):
def __init__(self):
super(PolicyNew, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Chandan-h-509/ignite
|
Policy
| false
| 8,976
|
[
"BSD-3-Clause"
] | 0
|
f8c39828cb1dac49b6ef358cdf77865bf2430106
|
https://github.com/Chandan-h-509/ignite/tree/f8c39828cb1dac49b6ef358cdf77865bf2430106
|
DecoderBlock
|
import torch
from torch.nn.modules.loss import *
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim import *
from torch.optim.lr_scheduler import *
class ConvRelu(nn.Module):
"""3x3 convolution followed by ReLU activation building block.
"""
def __init__(self, num_in, num_out):
"""Creates a `ConvReLU` building block.
Args:
num_in: number of input feature maps
num_out: number of output feature maps
"""
super().__init__()
self.block = nn.Conv2d(num_in, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, x):
"""
The networks forward pass for which
autograd synthesizes the backwards pass.
Args:
x: the input tensor
Returns:
The networks output tensor.
"""
return F.relu(self.block(x), inplace=True)
class DecoderBlock(nn.Module):
"""Decoder building block upsampling resolution by a factor of two.
"""
def __init__(self, num_in, num_out):
"""Creates a `DecoderBlock` building block.
Args:
num_in: number of input feature maps
num_out: number of output feature maps
"""
super().__init__()
self.block = ConvRelu(num_in, num_out)
def forward(self, x):
"""
The networks forward pass for which
autograd synthesizes the backwards pass.
Args:
x: the input tensor
Returns:
The networks output tensor.
"""
return self.block(F.interpolate(x, scale_factor=2, mode='nearest'))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn.modules.loss import *
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import *
from torch.optim import *
from torch.optim.lr_scheduler import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1024)](buf2, buf3,
1024, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_2, buf0, buf3
class ConvRelu(nn.Module):
"""3x3 convolution followed by ReLU activation building block.
"""
def __init__(self, num_in, num_out):
"""Creates a `ConvReLU` building block.
Args:
num_in: number of input feature maps
num_out: number of output feature maps
"""
super().__init__()
self.block = nn.Conv2d(num_in, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, x):
"""
The networks forward pass for which
autograd synthesizes the backwards pass.
Args:
x: the input tensor
Returns:
The networks output tensor.
"""
return F.relu(self.block(x), inplace=True)
class DecoderBlockNew(nn.Module):
"""Decoder building block upsampling resolution by a factor of two.
"""
def __init__(self, num_in, num_out):
"""Creates a `DecoderBlock` building block.
Args:
num_in: number of input feature maps
num_out: number of output feature maps
"""
super().__init__()
self.block = ConvRelu(num_in, num_out)
def forward(self, input_0):
primals_2 = self.block.block.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
DBusAI/catalyst
|
DecoderBlock
| false
| 8,977
|
[
"Apache-2.0"
] | 0
|
4fbdf477ea93b4d3781bf4eb10ae8da1747e4566
|
https://github.com/DBusAI/catalyst/tree/4fbdf477ea93b4d3781bf4eb10ae8da1747e4566
|
NormedLinear
|
import torch
import torch.nn.functional as F
from torch import nn
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).pow(
self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0[grid(256)](
primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_pow_1[grid(16)](primals_1,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf1
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class NormedLinearNew(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs):
super(NormedLinearNew, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CVPR2022-911/PPH
|
NormedLinear
| false
| 8,978
|
[
"Apache-2.0"
] | 0
|
f066933525aaeef412b8d166ef167f00170b5428
|
https://github.com/CVPR2022-911/PPH/tree/f066933525aaeef412b8d166ef167f00170b5428
|
L2Norm
|
import torch
from torch import nn
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20.0, eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_dims': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-10
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_dims, scale=20.0, eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2NormNew, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
CVPR2022-911/PPH
|
L2Norm
| false
| 8,979
|
[
"Apache-2.0"
] | 0
|
f066933525aaeef412b8d166ef167f00170b5428
|
https://github.com/CVPR2022-911/PPH/tree/f066933525aaeef412b8d166ef167f00170b5428
|
FCUDown
|
import torch
from functools import partial
from torch import nn
class FCUDown(nn.Module):
""" CNN feature maps -> Transformer patch embeddings
"""
def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-06)):
super(FCUDown, self).__init__()
self.dw_stride = dw_stride
self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1,
stride=1, padding=0)
self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride=
dw_stride)
self.ln = norm_layer(outplanes)
self.act = act_layer()
def forward(self, x, x_t):
x = self.conv_project(x)
x = self.sample_pooling(x).flatten(2).transpose(1, 2)
x = self.ln(x)
x = self.act(x)
x = torch.cat([x_t[:, 0][:, None, :], x], dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'outplanes': 4, 'dw_stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from functools import partial
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_convolution_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x0 = xindex % 4
x2 = xindex // 20
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x0 + 16 * x2 + (-1 + x1)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.7071067811865476
tmp13 = tmp9 * tmp12
tmp14 = libdevice.erf(tmp13)
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp11 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp6, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp5, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4, 4), (64, 16, 4, 1), 0), primals_1, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_convolution_0[grid(64)](buf1, primals_2,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf2, buf3, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64)](buf2, buf3, buf4,
primals_4, primals_5, buf5, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf3
del buf4
buf6 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(80)](primals_6, buf5, buf6, 80, XBLOCK=
128, num_warps=4, num_stages=1)
del buf5
del primals_6
return buf6, primals_1, primals_4, primals_5, reinterpret_tensor(primals_3,
(1, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4
), (16, 4, 1), 0), buf2
class FCUDownNew(nn.Module):
""" CNN feature maps -> Transformer patch embeddings
"""
def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-06)):
super(FCUDownNew, self).__init__()
self.dw_stride = dw_stride
self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1,
stride=1, padding=0)
self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride=
dw_stride)
self.ln = norm_layer(outplanes)
self.act = act_layer()
def forward(self, input_0, input_1):
primals_1 = self.conv_project.weight
primals_2 = self.conv_project.bias
primals_4 = self.ln.weight
primals_5 = self.ln.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
CVPR2022-911/PPH
|
FCUDown
| false
| 8,980
|
[
"Apache-2.0"
] | 0
|
f066933525aaeef412b8d166ef167f00170b5428
|
https://github.com/CVPR2022-911/PPH/tree/f066933525aaeef412b8d166ef167f00170b5428
|
ChannelMixer
|
import torch
from torch import nn
import torch.nn.functional as F
import torch.multiprocessing
import torch.nn
import torch.optim
import torch.distributed
class FeedForward(nn.Module):
def __init__(self, num_features, expansion_factor, dropout):
super().__init__()
num_hidden = expansion_factor * num_features
self.fc1 = nn.Linear(num_features, num_hidden)
self.fc2 = nn.Linear(num_hidden, num_features)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout1(F.gelu(self.fc1(x)))
x = self.dropout2(self.fc2(x))
return x
class ChannelMixer(nn.Module):
def __init__(self, d_model, expansion_factor, dropout):
super().__init__()
self.norm = nn.LayerNorm(d_model)
self.mlp = FeedForward(d_model, expansion_factor, dropout)
def forward(self, x):
residual = x
x = self.norm(x)
x = self.mlp(x)
out = x + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'expansion_factor': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.nn.functional as F
import torch.multiprocessing
import torch.nn
import torch.optim
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_gelu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_gelu_2[grid(1024)](buf3, buf4, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 16), (16, 1), 0
), primals_6, primals_4
class FeedForward(nn.Module):
def __init__(self, num_features, expansion_factor, dropout):
super().__init__()
num_hidden = expansion_factor * num_features
self.fc1 = nn.Linear(num_features, num_hidden)
self.fc2 = nn.Linear(num_hidden, num_features)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout1(F.gelu(self.fc1(x)))
x = self.dropout2(self.fc2(x))
return x
class ChannelMixerNew(nn.Module):
def __init__(self, d_model, expansion_factor, dropout):
super().__init__()
self.norm = nn.LayerNorm(d_model)
self.mlp = FeedForward(d_model, expansion_factor, dropout)
def forward(self, input_0):
primals_2 = self.norm.weight
primals_3 = self.norm.bias
primals_4 = self.mlp.fc1.weight
primals_5 = self.mlp.fc1.bias
primals_6 = self.mlp.fc2.weight
primals_7 = self.mlp.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Cardroid/Muskits
|
ChannelMixer
| false
| 8,981
|
[
"Apache-2.0"
] | 0
|
91708bb243bc671e48893a734aee710c356e4bd8
|
https://github.com/Cardroid/Muskits/tree/91708bb243bc671e48893a734aee710c356e4bd8
|
ResidualBlock
|
import torch
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf6
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5,
buf8, buf3, buf4, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
buf11 = buf10
del buf10
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid
(16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12,
buf13, buf17, buf16, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8,
buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0),
reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlockNew(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.in1.weight
primals_5 = self.in1.bias
primals_6 = self.conv2.conv2d.weight
primals_7 = self.conv2.conv2d.bias
primals_8 = self.in2.weight
primals_9 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Chandan-h-509/ignite
|
ResidualBlock
| false
| 8,982
|
[
"BSD-3-Clause"
] | 0
|
f8c39828cb1dac49b6ef358cdf77865bf2430106
|
https://github.com/Chandan-h-509/ignite/tree/f8c39828cb1dac49b6ef358cdf77865bf2430106
|
ClassHead
|
import torch
from itertools import product as product
import torch.nn as nn
class ClassHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHead, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2,
kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 2)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 6), (24576, 384, 6, 1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 2), (24576, 2, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(98304)](buf3, primals_2, 98304,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class ClassHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHeadNew, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2,
kernel_size=(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BossunWang/Pytorch_Retinaface
|
ClassHead
| false
| 8,983
|
[
"MIT"
] | 0
|
01ec6cfbcced1e8cc8802084e4e566ccaf2513a8
|
https://github.com/BossunWang/Pytorch_Retinaface/tree/01ec6cfbcced1e8cc8802084e4e566ccaf2513a8
|
LandmarkHead
|
import torch
from itertools import product as product
import torch.nn as nn
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (30, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (30,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 30, 64, 64), (122880, 1, 1920, 30))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 30), (122880, 1920, 30,
1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 10), (122880, 10, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(491520)](buf3, primals_2, 491520,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class LandmarkHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BossunWang/Pytorch_Retinaface
|
LandmarkHead
| false
| 8,984
|
[
"MIT"
] | 0
|
01ec6cfbcced1e8cc8802084e4e566ccaf2513a8
|
https://github.com/BossunWang/Pytorch_Retinaface/tree/01ec6cfbcced1e8cc8802084e4e566ccaf2513a8
|
ExampleBackbone
|
import torch
import torch.nn as nn
import torch._C
import torch.serialization
class ExampleBackbone(nn.Module):
def __init__(self):
super(ExampleBackbone, self).__init__()
self.conv = nn.Conv2d(3, 3, 3)
def init_weights(self, pretrained=None):
pass
def forward(self, x):
return [self.conv(x)]
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 46128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(46128)](buf1, primals_2, 46128,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ExampleBackboneNew(nn.Module):
def __init__(self):
super(ExampleBackboneNew, self).__init__()
self.conv = nn.Conv2d(3, 3, 3)
def init_weights(self, pretrained=None):
pass
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AnonSubmission6150/submission6150
|
ExampleBackbone
| false
| 8,985
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
NormedConv2d
|
import torch
from torch import nn
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06,
norm_over_kernel=False, **kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).
pow(self.power) + self.eps)
else:
weight_ = self.weight / (self.weight.view(self.weight.size(0),
-1).norm(dim=1, keepdim=True).pow(self.power)[..., None,
None] + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
elif torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_pow_0[grid(256)](primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1[grid(256)](
primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, primals_1, buf0, buf1
class NormedConv2dNew(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06,
norm_over_kernel=False, **kwargs):
super(NormedConv2dNew, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CVPR2022-911/PPH
|
NormedConv2d
| false
| 8,986
|
[
"Apache-2.0"
] | 0
|
f066933525aaeef412b8d166ef167f00170b5428
|
https://github.com/CVPR2022-911/PPH/tree/f066933525aaeef412b8d166ef167f00170b5428
|
BboxHead
|
import torch
from itertools import product as product
import torch.nn as nn
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 4)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 12), (49152, 768, 12, 1), 0
)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 4), (49152, 4, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(196608)](buf3, primals_2, 196608,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class BboxHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
BossunWang/Pytorch_Retinaface
|
BboxHead
| false
| 8,987
|
[
"MIT"
] | 0
|
01ec6cfbcced1e8cc8802084e4e566ccaf2513a8
|
https://github.com/BossunWang/Pytorch_Retinaface/tree/01ec6cfbcced1e8cc8802084e4e566ccaf2513a8
|
LayerNorm
|
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-05, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp28 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = tmp0 - tmp20
tmp27 = tmp26 / tmp25
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = buf0
del buf0
buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5,
primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
return buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1,
1), 0), buf5
class LayerNormNew(nn.Module):
def __init__(self, num_features, eps=1e-05, affine=True):
super(LayerNormNew, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AnnanShu/gan
|
LayerNorm
| false
| 8,988
|
[
"MIT"
] | 0
|
0c6409872ce65fe046e620fca053cff553bba9ef
|
https://github.com/AnnanShu/gan/tree/0c6409872ce65fe046e620fca053cff553bba9ef
|
RSoftmax
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
class RSoftmax(nn.Module):
"""Radix Softmax module in ``SplitAttentionConv2d``.
Args:
radix (int): Radix of input.
groups (int): Groups of input.
"""
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'radix': 4, 'groups': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 256, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32
)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf0
return reinterpret_tensor(buf1, (4, 64), (64, 1), 0),
class RSoftmaxNew(nn.Module):
"""Radix Softmax module in ``SplitAttentionConv2d``.
Args:
radix (int): Radix of input.
groups (int): Groups of input.
"""
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AnonSubmission6150/submission6150
|
RSoftmax
| false
| 8,989
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
RMSELoss
|
import torch
from torch import Tensor
from torch import nn
class RMSELoss(nn.Module):
""" Root mean square error. """
def __init__(self, **kwargs):
super().__init__()
self.mse = nn.MSELoss(**kwargs)
def forward(self, preds: 'Tensor', target: 'Tensor') ->Tensor:
return torch.sqrt(self.mse(preds, target))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSELossNew(nn.Module):
""" Root mean square error. """
def __init__(self, **kwargs):
super().__init__()
self.mse = nn.MSELoss(**kwargs)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Connormcc3/ludwig
|
RMSELoss
| false
| 8,990
|
[
"Apache-2.0"
] | 0
|
5d562cbc0c4fed3e607969e18611f34240eef177
|
https://github.com/Connormcc3/ludwig/tree/5d562cbc0c4fed3e607969e18611f34240eef177
|
ContrastiveDistanceLoss
|
import torch
from torch import nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ContrastiveDistanceLoss(nn.Module):
"""The Contrastive distance loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, distance_pred, distance_true) ->torch.Tensor:
"""Forward propagation method for the contrastive loss.
Args:
distance_pred: predicted distances
distance_true: true distances
Returns:
torch.Tensor: loss
"""
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(distance_pred, 2
) + distance_true * torch.pow(margin_distance, 2)
if self.reduction == 'mean':
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = tmp1 - tmp3
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8 * tmp8
tmp10 = tmp0 * tmp9
tmp11 = tmp5 + tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ContrastiveDistanceLossNew(nn.Module):
"""The Contrastive distance loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Casyfill/catalyst
|
ContrastiveDistanceLoss
| false
| 8,991
|
[
"Apache-2.0"
] | 0
|
7f63545dbc53902c3dd959463def28a67a16a989
|
https://github.com/Casyfill/catalyst/tree/7f63545dbc53902c3dd959463def28a67a16a989
|
SpatialGatherModule
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
class SpatialGatherModule(nn.Module):
"""Aggregate the context features according to the initial predicted
probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, scale):
super(SpatialGatherModule, self).__init__()
self.scale = scale
def forward(self, feats, probs):
"""Forward function."""
batch_size, num_classes, _height, _width = probs.size()
channels = feats.size(1)
probs = probs.view(batch_size, num_classes, -1)
feats = feats.view(batch_size, channels, -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=
1, num_warps=2, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64,
1, 16), 0), out=buf3)
del arg1_1
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0),
class SpatialGatherModuleNew(nn.Module):
"""Aggregate the context features according to the initial predicted
probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, scale):
super(SpatialGatherModuleNew, self).__init__()
self.scale = scale
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
AnonSubmission6150/submission6150
|
SpatialGatherModule
| false
| 8,992
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
DiceLoss
|
import functools
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=
None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(pred[:, i], target[..., i],
valid_mask=valid_mask, smooth=smooth, exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
class DiceLoss(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
"""
def __init__(self, smooth=1, exponent=2, reduction='mean', class_weight
=None, loss_weight=1.0, ignore_index=255, **kwards):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
def forward(self, pred, target, avg_factor=None, reduction_override=
None, **kwards):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(torch.clamp(target.long(), 0,
num_classes - 1), num_classes=num_classes)
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * dice_loss(pred, one_hot_target,
valid_mask=valid_mask, reduction=reduction, avg_factor=
avg_factor, smooth=self.smooth, exponent=self.exponent,
class_weight=class_weight, ignore_index=self.ignore_index)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp71 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last'
)
tmp153 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last'
)
tmp2 = tmp1.to(tl.int64)
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 3, tl.int64)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp6 == tmp3
tmp8 = tmp7.to(tl.int64)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp0 * tmp9
tmp11 = 255.0
tmp12 = tmp1 != tmp11
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp10 * tmp14
tmp17 = tmp16.to(tl.int64)
tmp18 = triton_helpers.maximum(tmp17, tmp3)
tmp19 = triton_helpers.minimum(tmp18, tmp5)
tmp20 = tmp19 == tmp3
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp0 * tmp22
tmp24 = tmp16 != tmp11
tmp25 = tmp24.to(tl.int64)
tmp26 = tmp25.to(tl.float32)
tmp27 = tmp23 * tmp26
tmp28 = tmp15 + tmp27
tmp30 = tmp29.to(tl.int64)
tmp31 = triton_helpers.maximum(tmp30, tmp3)
tmp32 = triton_helpers.minimum(tmp31, tmp5)
tmp33 = tmp32 == tmp3
tmp34 = tmp33.to(tl.int64)
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp0 * tmp35
tmp37 = tmp29 != tmp11
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38.to(tl.float32)
tmp40 = tmp36 * tmp39
tmp41 = tmp28 + tmp40
tmp43 = tmp42.to(tl.int64)
tmp44 = triton_helpers.maximum(tmp43, tmp3)
tmp45 = triton_helpers.minimum(tmp44, tmp5)
tmp46 = tmp45 == tmp3
tmp47 = tmp46.to(tl.int64)
tmp48 = tmp47.to(tl.float32)
tmp49 = tmp0 * tmp48
tmp50 = tmp42 != tmp11
tmp51 = tmp50.to(tl.int64)
tmp52 = tmp51.to(tl.float32)
tmp53 = tmp49 * tmp52
tmp54 = tmp41 + tmp53
tmp55 = tmp0 * tmp0
tmp56 = tmp8 * tmp8
tmp57 = tmp56.to(tl.float32)
tmp58 = tmp55 + tmp57
tmp59 = tmp21 * tmp21
tmp60 = tmp59.to(tl.float32)
tmp61 = tmp55 + tmp60
tmp62 = tmp58 + tmp61
tmp63 = tmp34 * tmp34
tmp64 = tmp63.to(tl.float32)
tmp65 = tmp55 + tmp64
tmp66 = tmp62 + tmp65
tmp67 = tmp47 * tmp47
tmp68 = tmp67.to(tl.float32)
tmp69 = tmp55 + tmp68
tmp70 = tmp66 + tmp69
tmp72 = tl.full([1, 1], 1, tl.int64)
tmp73 = tmp6 == tmp72
tmp74 = tmp73.to(tl.int64)
tmp75 = tmp74.to(tl.float32)
tmp76 = tmp71 * tmp75
tmp77 = tmp76 * tmp14
tmp78 = tmp19 == tmp72
tmp79 = tmp78.to(tl.int64)
tmp80 = tmp79.to(tl.float32)
tmp81 = tmp71 * tmp80
tmp82 = tmp81 * tmp26
tmp83 = tmp77 + tmp82
tmp84 = tmp32 == tmp72
tmp85 = tmp84.to(tl.int64)
tmp86 = tmp85.to(tl.float32)
tmp87 = tmp71 * tmp86
tmp88 = tmp87 * tmp39
tmp89 = tmp83 + tmp88
tmp90 = tmp45 == tmp72
tmp91 = tmp90.to(tl.int64)
tmp92 = tmp91.to(tl.float32)
tmp93 = tmp71 * tmp92
tmp94 = tmp93 * tmp52
tmp95 = tmp89 + tmp94
tmp96 = tmp71 * tmp71
tmp97 = tmp74 * tmp74
tmp98 = tmp97.to(tl.float32)
tmp99 = tmp96 + tmp98
tmp100 = tmp79 * tmp79
tmp101 = tmp100.to(tl.float32)
tmp102 = tmp96 + tmp101
tmp103 = tmp99 + tmp102
tmp104 = tmp85 * tmp85
tmp105 = tmp104.to(tl.float32)
tmp106 = tmp96 + tmp105
tmp107 = tmp103 + tmp106
tmp108 = tmp91 * tmp91
tmp109 = tmp108.to(tl.float32)
tmp110 = tmp96 + tmp109
tmp111 = tmp107 + tmp110
tmp113 = tl.full([1, 1], 2, tl.int64)
tmp114 = tmp6 == tmp113
tmp115 = tmp114.to(tl.int64)
tmp116 = tmp115.to(tl.float32)
tmp117 = tmp112 * tmp116
tmp118 = tmp117 * tmp14
tmp119 = tmp19 == tmp113
tmp120 = tmp119.to(tl.int64)
tmp121 = tmp120.to(tl.float32)
tmp122 = tmp112 * tmp121
tmp123 = tmp122 * tmp26
tmp124 = tmp118 + tmp123
tmp125 = tmp32 == tmp113
tmp126 = tmp125.to(tl.int64)
tmp127 = tmp126.to(tl.float32)
tmp128 = tmp112 * tmp127
tmp129 = tmp128 * tmp39
tmp130 = tmp124 + tmp129
tmp131 = tmp45 == tmp113
tmp132 = tmp131.to(tl.int64)
tmp133 = tmp132.to(tl.float32)
tmp134 = tmp112 * tmp133
tmp135 = tmp134 * tmp52
tmp136 = tmp130 + tmp135
tmp137 = tmp112 * tmp112
tmp138 = tmp115 * tmp115
tmp139 = tmp138.to(tl.float32)
tmp140 = tmp137 + tmp139
tmp141 = tmp120 * tmp120
tmp142 = tmp141.to(tl.float32)
tmp143 = tmp137 + tmp142
tmp144 = tmp140 + tmp143
tmp145 = tmp126 * tmp126
tmp146 = tmp145.to(tl.float32)
tmp147 = tmp137 + tmp146
tmp148 = tmp144 + tmp147
tmp149 = tmp132 * tmp132
tmp150 = tmp149.to(tl.float32)
tmp151 = tmp137 + tmp150
tmp152 = tmp148 + tmp151
tmp154 = tmp6 == tmp5
tmp155 = tmp154.to(tl.int64)
tmp156 = tmp155.to(tl.float32)
tmp157 = tmp153 * tmp156
tmp158 = tmp157 * tmp14
tmp159 = tmp19 == tmp5
tmp160 = tmp159.to(tl.int64)
tmp161 = tmp160.to(tl.float32)
tmp162 = tmp153 * tmp161
tmp163 = tmp162 * tmp26
tmp164 = tmp158 + tmp163
tmp165 = tmp32 == tmp5
tmp166 = tmp165.to(tl.int64)
tmp167 = tmp166.to(tl.float32)
tmp168 = tmp153 * tmp167
tmp169 = tmp168 * tmp39
tmp170 = tmp164 + tmp169
tmp171 = tmp45 == tmp5
tmp172 = tmp171.to(tl.int64)
tmp173 = tmp172.to(tl.float32)
tmp174 = tmp153 * tmp173
tmp175 = tmp174 * tmp52
tmp176 = tmp170 + tmp175
tmp177 = tmp153 * tmp153
tmp178 = tmp155 * tmp155
tmp179 = tmp178.to(tl.float32)
tmp180 = tmp177 + tmp179
tmp181 = tmp160 * tmp160
tmp182 = tmp181.to(tl.float32)
tmp183 = tmp177 + tmp182
tmp184 = tmp180 + tmp183
tmp185 = tmp166 * tmp166
tmp186 = tmp185.to(tl.float32)
tmp187 = tmp177 + tmp186
tmp188 = tmp184 + tmp187
tmp189 = tmp172 * tmp172
tmp190 = tmp189.to(tl.float32)
tmp191 = tmp177 + tmp190
tmp192 = tmp188 + tmp191
tmp193 = 2.0
tmp194 = tmp54 * tmp193
tmp195 = 1.0
tmp196 = tmp194 + tmp195
tmp197 = tmp70 + tmp195
tmp198 = tmp196 / tmp197
tmp199 = tmp195 - tmp198
tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK])
tmp202 = tl.sum(tmp200, 1)[:, None]
tmp203 = tmp95 * tmp193
tmp204 = tmp203 + tmp195
tmp205 = tmp111 + tmp195
tmp206 = tmp204 / tmp205
tmp207 = tmp195 - tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = tl.sum(tmp208, 1)[:, None]
tmp211 = tmp136 * tmp193
tmp212 = tmp211 + tmp195
tmp213 = tmp152 + tmp195
tmp214 = tmp212 / tmp213
tmp215 = tmp195 - tmp214
tmp216 = tl.broadcast_to(tmp215, [XBLOCK, RBLOCK])
tmp218 = tl.sum(tmp216, 1)[:, None]
tmp219 = tmp176 * tmp193
tmp220 = tmp219 + tmp195
tmp221 = tmp192 + tmp195
tmp222 = tmp220 / tmp221
tmp223 = tmp195 - tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = tl.sum(tmp224, 1)[:, None]
tmp227 = 4.0
tmp228 = tmp202 / tmp227
tmp229 = 0.0
tmp230 = tmp228 + tmp229
tmp231 = tmp210 / tmp227
tmp232 = tmp230 + tmp231
tmp233 = tmp218 / tmp227
tmp234 = tmp232 + tmp233
tmp235 = tmp226 / tmp227
tmp236 = tmp234 + tmp235
tmp237 = 0.25
tmp238 = tmp236 * tmp237
tmp239 = tmp238 / tmp195
tmp240 = tmp239 * tmp195
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp240, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10
del buf10
triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2[grid
(1)](buf14, buf1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1
)
del arg1_1
del buf1
return buf14,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=
None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(pred[:, i], target[..., i],
valid_mask=valid_mask, smooth=smooth, exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
class DiceLossNew(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
"""
def __init__(self, smooth=1, exponent=2, reduction='mean', class_weight
=None, loss_weight=1.0, ignore_index=255, **kwards):
super(DiceLossNew, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
AnonSubmission6150/submission6150
|
DiceLoss
| false
| 8,993
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
CrossEntropyLoss
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def binary_cross_entropy(pred, label, weight=None, reduction='mean',
avg_factor=None, class_weight=None, ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
assert pred.dim() == 2 and label.dim() == 1 or pred.dim(
) == 4 and label.dim(
) == 3, 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(pred, label.float(),
pos_weight=class_weight, reduction='none')
loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor
=avg_factor)
return loss
def cross_entropy(pred, label, weight=None, class_weight=None, reduction=
'mean', avg_factor=None, ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
loss = F.cross_entropy(pred, label, weight=class_weight, reduction=
'none', ignore_index=ignore_index)
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction,
avg_factor=avg_factor)
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=
None, class_weight=None, ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, weight=
class_weight, reduction='mean')[None]
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean',
class_weight=None, loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self, cls_score, label, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(cls_score, label,
weight, class_weight=class_weight, reduction=reduction,
avg_factor=avg_factor, **kwargs)
return loss_cls
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = tl.sum(tmp28, 1)[:, None]
tmp31 = 64.0
tmp32 = tmp30 / tmp31
tmp33 = 1.0
tmp34 = tmp32 * tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2,
buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def binary_cross_entropy(pred, label, weight=None, reduction='mean',
avg_factor=None, class_weight=None, ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
assert pred.dim() == 2 and label.dim() == 1 or pred.dim(
) == 4 and label.dim(
) == 3, 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(pred, label.float(),
pos_weight=class_weight, reduction='none')
loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor
=avg_factor)
return loss
def cross_entropy(pred, label, weight=None, class_weight=None, reduction=
'mean', avg_factor=None, ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
loss = F.cross_entropy(pred, label, weight=class_weight, reduction=
'none', ignore_index=ignore_index)
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction,
avg_factor=avg_factor)
return loss
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
class_weight = mmcv.load(class_weight)
return class_weight
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=
None, class_weight=None, ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, weight=
class_weight, reduction='mean')[None]
class CrossEntropyLossNew(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean',
class_weight=None, loss_weight=1.0):
super(CrossEntropyLossNew, self).__init__()
assert use_sigmoid is False or use_mask is False
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
AnonSubmission6150/submission6150
|
CrossEntropyLoss
| false
| 8,994
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
PTLogreg
|
import torch
import torch.nn as nn
class PTLogreg(nn.Module):
def __init__(self, D, C):
"""Arguments:
- D: dimensions of each datapoint
- C: number of classes
"""
super(PTLogreg, self).__init__()
self.W = torch.nn.Parameter(torch.zeros(D, C))
self.b = torch.nn.Parameter(torch.zeros(C))
def forward(self, X):
return nn.functional.softmax(torch.mm(X, self.W) + self.b)
def get_loss(self, X, Yoh_):
Y = self.forward(X)
return -torch.mean(torch.sum(Yoh_ * torch.log(Y) + (1 - Yoh_) *
torch.log(1 - Y)))
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'D': 4, 'C': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_2, primals_1, alpha=1, beta
=1, out=buf0)
del primals_1
del primals_3
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
return buf2, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class PTLogregNew(nn.Module):
def __init__(self, D, C):
"""Arguments:
- D: dimensions of each datapoint
- C: number of classes
"""
super(PTLogregNew, self).__init__()
self.W = torch.nn.Parameter(torch.zeros(D, C))
self.b = torch.nn.Parameter(torch.zeros(C))
def get_loss(self, X, Yoh_):
Y = self.forward(X)
return -torch.mean(torch.sum(Yoh_ * torch.log(Y) + (1 - Yoh_) *
torch.log(1 - Y)))
def forward(self, input_0):
primals_1 = self.W
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EduardEdiJerkovic/deeplearning
|
PTLogreg
| false
| 8,995
|
[
"MIT"
] | 0
|
0493b26ca153f93f41e8de930e16df658fb01a56
|
https://github.com/EduardEdiJerkovic/deeplearning/tree/0493b26ca153f93f41e8de930e16df658fb01a56
|
Encoding
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch._C
import torch.serialization
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
self.channels, self.num_codes = channels, num_codes
std = 1.0 / (num_codes * channels) ** 0.5
self.codewords = nn.Parameter(torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float)
.uniform_(-1, 0), requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords
).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x -
reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
batch_size = x.size(0)
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
assignment_weights = F.softmax(self.scaled_l2(x, self.codewords,
self.scale), dim=2)
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})')
return repr_str
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'num_codes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tmp0 * tmp19
tl.store(out_ptr0 + x4, tmp20, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r3 = rindex
x1 = xindex // 4 % 4
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * r3 + 64 * x2), xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r3 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp0 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tl.store(out_ptr0 + x5, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(256)](primals_3, primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_2[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_mul_sub_sum_3[grid(64)](buf2, primals_1, primals_2,
buf3, 64, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
return buf3, primals_1, primals_2, primals_3
class EncodingNew(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(EncodingNew, self).__init__()
self.channels, self.num_codes = channels, num_codes
std = 1.0 / (num_codes * channels) ** 0.5
self.codewords = nn.Parameter(torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float)
.uniform_(-1, 0), requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords
).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1),
num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x -
reshaped_codewords)).sum(dim=1)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})')
return repr_str
def forward(self, input_0):
primals_2 = self.codewords
primals_3 = self.scale
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AnonSubmission6150/submission6150
|
Encoding
| false
| 8,996
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
SquareActivation
|
import torch
import torch.nn as nn
class SquareActivation(nn.Module):
"""
Square activation function, clamps the output between 0 and 20 to avoid overflow
"""
@staticmethod
def forward(x):
return torch.clamp(x ** 2, 0, 20)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 0.0
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 20.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SquareActivationNew(nn.Module):
"""
Square activation function, clamps the output between 0 and 20 to avoid overflow
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Ergodice/PWLU
|
SquareActivation
| false
| 8,997
|
[
"MIT"
] | 0
|
8e714cff4245b9282fe6b9420ffbab8178ba456c
|
https://github.com/Ergodice/PWLU/tree/8e714cff4245b9282fe6b9420ffbab8178ba456c
|
PPMConcat
|
import torch
import torch.nn as nn
import torch._C
import torch.serialization
class PPMConcat(nn.ModuleList):
"""Pyramid Pooling Module that only concat the features of each layer.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
"""
def __init__(self, pool_scales=(1, 3, 6, 8)):
super(PPMConcat, self).__init__([nn.AdaptiveAvgPool2d(pool_scale) for
pool_scale in pool_scales])
def forward(self, feats):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(feats)
ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
concat_outs = torch.cat(ppm_outs, dim=2)
return concat_outs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_cat_mean_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + 110 * x0, tmp6, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_1(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x3 = xindex % 9
tmp0 = 4 * x1 // 3
tmp1 = 2 + 4 * x1 // 3
tmp2 = tmp0 < tmp1
tmp3 = 4 * x0 // 3
tmp4 = 2 + 4 * x0 // 3
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3),
tmp6 & xmask, other=0.0)
tmp8 = 1 + 4 * x0 // 3
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp10 & xmask, other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 4 * x1 // 3
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp15 & xmask, other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp18 & xmask, other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_2(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x3 = xindex % 36
tmp0 = 2 * x1 // 3
tmp1 = (9 + 4 * x1) // 6
tmp2 = tmp0 < tmp1
tmp3 = 2 * x0 // 3
tmp4 = (9 + 4 * x0) // 6
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3),
tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + 2 * x0 // 3
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 2 * x1 // 3
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_cat_3(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x3 = xindex % 64
tmp0 = x1 // 2
tmp1 = (11 + 4 * x1) // 8
tmp2 = tmp0 < tmp1
tmp3 = x0 // 2
tmp4 = (11 + 4 * x0) // 8
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * x2 + x0 // 2), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + x0 // 2
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (x1 // 2) + 16 * x2 + x0 // 2),
tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + x1 // 2
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (x1 // 2) + 16 * x2 + x0 // 2),
tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (x1 // 2) + 16 * x2 + x0 // 2),
tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf8 = empty_strided_cuda((4, 4, 110), (440, 110, 1), torch.float32)
buf4 = reinterpret_tensor(buf8, (4, 4, 1), (440, 110, 1), 0)
get_raw_stream(0)
triton_per_fused_cat_mean_0[grid(16)](arg0_1, buf4, 16, 16, XBLOCK=
1, num_warps=2, num_stages=1)
buf5 = reinterpret_tensor(buf8, (4, 4, 9), (440, 110, 1), 1)
triton_poi_fused__adaptive_avg_pool2d_cat_1[grid(144)](arg0_1, buf5,
144, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf8, (4, 4, 36), (440, 110, 1), 10)
triton_poi_fused__adaptive_avg_pool2d_cat_2[grid(576)](arg0_1, buf6,
576, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf8, (4, 4, 64), (440, 110, 1), 46)
triton_poi_fused__adaptive_avg_pool2d_cat_3[grid(1024)](arg0_1,
buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf8,
class PPMConcatNew(nn.ModuleList):
"""Pyramid Pooling Module that only concat the features of each layer.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
"""
def __init__(self, pool_scales=(1, 3, 6, 8)):
super(PPMConcatNew, self).__init__([nn.AdaptiveAvgPool2d(pool_scale
) for pool_scale in pool_scales])
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
AnonSubmission6150/submission6150
|
PPMConcat
| false
| 8,998
|
[
"Apache-2.0"
] | 0
|
571633d9a12b4fd7a9546947787fc068966dab04
|
https://github.com/AnonSubmission6150/submission6150/tree/571633d9a12b4fd7a9546947787fc068966dab04
|
EDMLoss
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class EDMLoss(nn.Module):
def __init__(self):
super(EDMLoss, self).__init__()
def forward(self, p_target: 'Variable', p_estimate: 'Variable'):
assert p_target.shape == p_estimate.shape
cdf_target = torch.cumsum(p_target, dim=1)
cdf_estimate = torch.cumsum(p_estimate, dim=1)
cdf_diff = cdf_estimate - cdf_target
samplewise_emd = torch.sqrt(torch.mean(torch.pow(torch.abs(cdf_diff
), 2)))
return samplewise_emd.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl
.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp3, xmask)
@triton.jit
def triton_per_fused_abs_mean_pow_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = 1.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_0[grid(64)](arg1_1, buf0, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_cumsum_0[grid(64)](arg0_1, buf1, 64, 4, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_abs_mean_pow_sqrt_sub_1[grid(1)](buf3, buf0, buf1,
1, 256, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class EDMLossNew(nn.Module):
def __init__(self):
super(EDMLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
DazhiZhong/NIMA
|
EDMLoss
| false
| 8,999
|
[
"MIT"
] | 0
|
82655ac762414ef2a980feba8b6978c605c66a4d
|
https://github.com/DazhiZhong/NIMA/tree/82655ac762414ef2a980feba8b6978c605c66a4d
|
ContrastiveEmbeddingLoss
|
import torch
from torch import nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ContrastiveEmbeddingLoss(nn.Module):
"""The Contrastive embedding loss.
It has been proposed in `Dimensionality Reduction
by Learning an Invariant Mapping`_.
.. _Dimensionality Reduction by Learning an Invariant Mapping:
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, embeddings_left: 'torch.Tensor', embeddings_right:
'torch.Tensor', distance_true) ->torch.Tensor:
"""Forward propagation method for the contrastive loss.
Args:
embeddings_left: left objects embeddings
embeddings_right: right objects embeddings
distance_true: true distances
Returns:
torch.Tensor: loss
"""
diff = embeddings_left - embeddings_right
distance_pred = torch.sqrt(torch.sum(torch.pow(diff, 2), 1))
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(distance_pred, 2
) + distance_true * torch.pow(margin_distance, 2)
if self.reduction == 'mean':
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = tmp1 - tmp3
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8 * tmp8
tmp10 = tmp0 * tmp9
tmp11 = tmp5 + tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sqrt_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_add_clamp_div_mul_pow_rsub_sum_1[grid(1)](buf2,
arg2_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf2,
class ContrastiveEmbeddingLossNew(nn.Module):
"""The Contrastive embedding loss.
It has been proposed in `Dimensionality Reduction
by Learning an Invariant Mapping`_.
.. _Dimensionality Reduction by Learning an Invariant Mapping:
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Casyfill/catalyst
|
ContrastiveEmbeddingLoss
| false
| 9,000
|
[
"Apache-2.0"
] | 0
|
7f63545dbc53902c3dd959463def28a67a16a989
|
https://github.com/Casyfill/catalyst/tree/7f63545dbc53902c3dd959463def28a67a16a989
|
ycbcr_to_rgb_jpeg
|
import torch
import numpy as np
import torch.nn as nn
class ycbcr_to_rgb_jpeg(nn.Module):
""" Converts YCbCr image to RGB JPEG
Input:
image(tensor): batch x height x width x 3
Outpput:
result(tensor): batch x 3 x height x width
"""
def __init__(self):
super(ycbcr_to_rgb_jpeg, self).__init__()
matrix = np.array([[1.0, 0.0, 1.402], [1, -0.344136, -0.714136], [1,
1.772, 0]], dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0, -128.0, -128.0]))
self.matrix = nn.Parameter(torch.from_numpy(matrix))
def forward(self, image):
result = torch.tensordot(image + self.shift, self.matrix, dims=1)
result.view(image.shape)
return result.permute(0, 3, 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 3])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
assert_size_stride(primals_3, (3, 3), (1, 3))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(192)](primals_2, primals_1, buf0, 192,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 3), (3, 1), 0),
primals_3, out=buf1)
return reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 1, 12, 3), 0
), reinterpret_tensor(buf0, (64, 3), (3, 1), 0), reinterpret_tensor(
primals_3, (3, 3), (3, 1), 0)
class ycbcr_to_rgb_jpegNew(nn.Module):
""" Converts YCbCr image to RGB JPEG
Input:
image(tensor): batch x height x width x 3
Outpput:
result(tensor): batch x 3 x height x width
"""
def __init__(self):
super(ycbcr_to_rgb_jpegNew, self).__init__()
matrix = np.array([[1.0, 0.0, 1.402], [1, -0.344136, -0.714136], [1,
1.772, 0]], dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0, -128.0, -128.0]))
self.matrix = nn.Parameter(torch.from_numpy(matrix))
def forward(self, input_0):
primals_1 = self.shift
primals_3 = self.matrix
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DazhiZhong/DiffJPEG
|
ycbcr_to_rgb_jpeg
| false
| 9,001
|
[
"MIT"
] | 0
|
e20de92539f31a57906ae4c32a41dc46e774c316
|
https://github.com/DazhiZhong/DiffJPEG/tree/e20de92539f31a57906ae4c32a41dc46e774c316
|
ContrastivePairwiseEmbeddingLoss
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ContrastivePairwiseEmbeddingLoss(nn.Module):
"""ContrastivePairwiseEmbeddingLoss – proof of concept criterion.
Still work in progress.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, embeddings_pred, embeddings_true) ->torch.Tensor:
"""Forward propagation method for the contrastive loss.
Work in progress.
Args:
embeddings_pred: predicted embeddings
embeddings_true: true embeddings
Returns:
torch.Tensor: loss
"""
device = embeddings_pred.device
pairwise_similarity = torch.einsum('se,ae->sa', embeddings_pred,
embeddings_true)
bs = embeddings_pred.shape[0]
batch_idx = torch.arange(bs, device=device)
loss = F.cross_entropy(pairwise_similarity, batch_idx, reduction=
self.reduction)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_arange_nll_loss_forward_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp6 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp0 = r0
tmp1 = tl.full([1, 1], -100, tl.int64)
tmp2 = tmp0 != tmp1
tmp3 = tl.full([1, 1], 0, tl.int64)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tl_math.exp(tmp6)
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tl_math.log(tmp16)
tmp18 = tmp5 - tmp17
tmp19 = -tmp18
tmp20 = 0.0
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp2.to(tl.int64)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.sum(tmp26, 1)[:, None]
tmp29 = tmp28.to(tl.float32)
tmp30 = tmp24 / tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (1, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg1_1, (1, 4, 4), (0, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_arange_nll_loss_forward_1[grid(1)](buf4, buf1, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf4,
class ContrastivePairwiseEmbeddingLossNew(nn.Module):
"""ContrastivePairwiseEmbeddingLoss – proof of concept criterion.
Still work in progress.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin=1.0, reduction='mean'):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or 'none'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Casyfill/catalyst
|
ContrastivePairwiseEmbeddingLoss
| false
| 9,002
|
[
"Apache-2.0"
] | 0
|
7f63545dbc53902c3dd959463def28a67a16a989
|
https://github.com/Casyfill/catalyst/tree/7f63545dbc53902c3dd959463def28a67a16a989
|
BWCEWLoss
|
import torch
from torch import Tensor
from typing import Optional
from torch import nn
class BWCEWLoss(nn.Module):
""" Binary weighted cross entropy loss. """
def __init__(self, positive_class_weight: 'Optional[Tensor]'=None,
robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs):
super().__init__()
self.loss_fn = nn.BCEWithLogitsLoss(pos_weight=
positive_class_weight, **kwargs)
self.robust_lambda = robust_lambda
self.confidence_penalty = confidence_penalty
def forward(self, preds: 'torch.Tensor', target: 'torch.Tensor'):
train_loss = self.loss_fn(preds, target.float())
if self.robust_lambda > 0:
train_loss = (1 - self.robust_lambda
) * train_loss + self.robust_lambda / 2
train_mean_loss = torch.mean(train_loss)
if self.confidence_penalty > 0:
probabilities = torch.sigmoid(preds)
mean_penalty = utils.mean_confidence_penalty(probabilities, 2)
train_mean_loss += self.confidence_penalty * mean_penalty
return train_mean_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import Optional
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 / tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BWCEWLossNew(nn.Module):
""" Binary weighted cross entropy loss. """
def __init__(self, positive_class_weight: 'Optional[Tensor]'=None,
robust_lambda: 'int'=0, confidence_penalty: 'int'=0, **kwargs):
super().__init__()
self.loss_fn = nn.BCEWithLogitsLoss(pos_weight=
positive_class_weight, **kwargs)
self.robust_lambda = robust_lambda
self.confidence_penalty = confidence_penalty
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Connormcc3/ludwig
|
BWCEWLoss
| false
| 9,003
|
[
"Apache-2.0"
] | 0
|
5d562cbc0c4fed3e607969e18611f34240eef177
|
https://github.com/Connormcc3/ludwig/tree/5d562cbc0c4fed3e607969e18611f34240eef177
|
DQNetwork
|
from torch.nn import Module
import torch
import torch.nn as nn
class DQNetwork(Module):
def __init__(self, num_states, num_actions):
super(DQNetwork, self).__init__()
self.relu = nn.ReLU()
self.fc_layer1 = nn.Linear(num_states, 256)
self.fc_layer2 = nn.Linear(256, 256)
self.q_val = nn.Linear(256, num_actions)
def forward(self, x):
x = self.relu(self.fc_layer1(x))
x = self.relu(self.fc_layer2(x))
out = self.q_val(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_states': 4, 'num_actions': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 256), (256, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf6, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf5, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), primals_6, buf5, primals_4, buf6
class DQNetworkNew(Module):
def __init__(self, num_states, num_actions):
super(DQNetworkNew, self).__init__()
self.relu = nn.ReLU()
self.fc_layer1 = nn.Linear(num_states, 256)
self.fc_layer2 = nn.Linear(256, 256)
self.q_val = nn.Linear(256, num_actions)
def forward(self, input_0):
primals_1 = self.fc_layer1.weight
primals_2 = self.fc_layer1.bias
primals_4 = self.fc_layer2.weight
primals_5 = self.fc_layer2.bias
primals_6 = self.q_val.weight
primals_7 = self.q_val.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Devanshu-singh-VR/Reinforcement-Learning_Mixed
|
DQNetwork
| false
| 9,004
|
[
"MIT"
] | 0
|
6b8b23977864f918ab8958b729d0faabcca720e4
|
https://github.com/Devanshu-singh-VR/Reinforcement-Learning_Mixed/tree/6b8b23977864f918ab8958b729d0faabcca720e4
|
deepQ
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class deepQ(nn.Module):
def __init__(self, action_size, obs_size, hidden_size=256):
super().__init__()
self.input_layer = nn.Linear(obs_size, hidden_size)
self.output_layer = nn.Linear(hidden_size, action_size)
def forward(self, x):
x = F.relu(self.input_layer(x))
x = self.output_layer(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'action_size': 4, 'obs_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), primals_4, buf3
class deepQNew(nn.Module):
def __init__(self, action_size, obs_size, hidden_size=256):
super().__init__()
self.input_layer = nn.Linear(obs_size, hidden_size)
self.output_layer = nn.Linear(hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.input_layer.weight
primals_2 = self.input_layer.bias
primals_4 = self.output_layer.weight
primals_5 = self.output_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ExilesAI/RLAgents
|
deepQ
| false
| 9,005
|
[
"MIT"
] | 0
|
b8159a933c4674c7a62bfe9555870336616a59f3
|
https://github.com/ExilesAI/RLAgents/tree/b8159a933c4674c7a62bfe9555870336616a59f3
|
ArcMarginProduct
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
class ArcMarginProduct(nn.Module):
"""Implementation of Arc Margin Product.
Args:
in_features: size of each input sample.
out_features: size of each output sample.
Shape:
- Input: :math:`(batch, H_{in})` where
:math:`H_{in} = in\\_features`.
- Output: :math:`(batch, H_{out})` where
:math:`H_{out} = out\\_features`.
Example:
>>> layer = ArcMarginProduct(5, 10)
>>> loss_fn = nn.CrosEntropyLoss()
>>> embedding = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(10)
>>> output = layer(embedding)
>>> loss = loss_fn(output, target)
>>> loss.backward()
"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(ArcMarginProduct, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
def __repr__(self) ->str:
"""Object representation."""
rep = (
f'ArcMarginProduct(in_features={self.in_features},out_features={self.out_features})'
)
return rep
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
"""
Args:
input: input features,
expected shapes ``BxF`` where ``B``
is batch dimension and ``F`` is an
input feature dimension.
Returns:
tensor (logits) with shapes ``BxC``
where ``C`` is a number of classes
(out_features).
"""
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
return cosine
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.distributed
import torch.multiprocessing
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class ArcMarginProductNew(nn.Module):
"""Implementation of Arc Margin Product.
Args:
in_features: size of each input sample.
out_features: size of each output sample.
Shape:
- Input: :math:`(batch, H_{in})` where
:math:`H_{in} = in\\_features`.
- Output: :math:`(batch, H_{out})` where
:math:`H_{out} = out\\_features`.
Example:
>>> layer = ArcMarginProduct(5, 10)
>>> loss_fn = nn.CrosEntropyLoss()
>>> embedding = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(10)
>>> output = layer(embedding)
>>> loss = loss_fn(output, target)
>>> loss.backward()
"""
def __init__(self, in_features: 'int', out_features: 'int'):
super(ArcMarginProductNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
def __repr__(self) ->str:
"""Object representation."""
rep = (
f'ArcMarginProduct(in_features={self.in_features},out_features={self.out_features})'
)
return rep
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Casyfill/catalyst
|
ArcMarginProduct
| false
| 9,006
|
[
"Apache-2.0"
] | 0
|
7f63545dbc53902c3dd959463def28a67a16a989
|
https://github.com/Casyfill/catalyst/tree/7f63545dbc53902c3dd959463def28a67a16a989
|
chroma_subsampling
|
import torch
import torch.nn as nn
class chroma_subsampling(nn.Module):
""" Chroma subsampling on CbCv channels
Input:
image(tensor): batch x height x width x 3
Output:
y(tensor): batch x height x width
cb(tensor): batch x height/2 x width/2
cr(tensor): batch x height/2 x width/2
"""
def __init__(self):
super(chroma_subsampling, self).__init__()
def forward(self, image):
image_2 = image.permute(0, 3, 1, 2).clone()
avg_pool = nn.AvgPool2d(kernel_size=2, stride=(2, 2),
count_include_pad=False)
cb = avg_pool(image_2[:, 1, :, :].unsqueeze(1))
cr = avg_pool(image_2[:, 2, :, :].unsqueeze(1))
cb = cb.permute(0, 2, 3, 1)
cr = cr.permute(0, 2, 3, 1)
return image[:, :, :, 0], cb.squeeze(3), cr.squeeze(3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (17 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (21 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr0 + (6 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (18 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (22 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 2, 2), (4, 1, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 2, 2), (4, 1, 2, 1), torch.float32)
triton_poi_fused_avg_pool2d_1[grid(16)](arg0_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return reinterpret_tensor(arg0_1, (4, 4, 4), (64, 16, 4), 0
), reinterpret_tensor(buf0, (4, 2, 2), (4, 2, 1), 0
), reinterpret_tensor(buf1, (4, 2, 2), (4, 2, 1), 0)
class chroma_subsamplingNew(nn.Module):
""" Chroma subsampling on CbCv channels
Input:
image(tensor): batch x height x width x 3
Output:
y(tensor): batch x height x width
cb(tensor): batch x height/2 x width/2
cr(tensor): batch x height/2 x width/2
"""
def __init__(self):
super(chroma_subsamplingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1], output[2]
|
DazhiZhong/DiffJPEG
|
chroma_subsampling
| false
| 9,007
|
[
"MIT"
] | 0
|
e20de92539f31a57906ae4c32a41dc46e774c316
|
https://github.com/DazhiZhong/DiffJPEG/tree/e20de92539f31a57906ae4c32a41dc46e774c316
|
BPR
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BPR(nn.Module):
def __init__(self, user_size, item_size, dim, weight_decay):
super().__init__()
self.W = nn.Parameter(torch.empty(user_size, dim))
self.H = nn.Parameter(torch.empty(item_size, dim))
nn.init.xavier_normal_(self.W.data)
nn.init.xavier_normal_(self.H.data)
self.weight_decay = weight_decay
def forward(self, u, i, j):
"""Return loss value.
Args:
u(torch.LongTensor): tensor stored user indexes. [batch_size,]
i(torch.LongTensor): tensor stored item indexes which is prefered by user. [batch_size,]
j(torch.LongTensor): tensor stored item indexes which is not prefered by user. [batch_size,]
Returns:
torch.FloatTensor
"""
u = self.W[u, :]
i = self.H[i, :]
j = self.H[j, :]
x_ui = torch.mul(u, i).sum(dim=1)
x_uj = torch.mul(u, j).sum(dim=1)
x_uij = x_ui - x_uj
log_prob = F.logsigmoid(x_uij).sum()
regularization = self.weight_decay * (u.norm(dim=1).pow(2).sum() +
i.norm(dim=1).pow(2).sum() + j.norm(dim=1).pow(2).sum())
return -log_prob + regularization
def recommend(self, u):
"""Return recommended item list given users.
Args:
u(torch.LongTensor): tensor stored user indexes. [batch_size,]
Returns:
pred(torch.LongTensor): recommended item list sorted by preference. [batch_size, item_size]
"""
u = self.W[u, :]
x_ui = torch.mm(u, self.H.t())
pred = torch.argsort(x_ui, dim=1)
return pred
def get_inputs():
return [torch.ones([4], dtype=torch.int64), torch.ones([4], dtype=torch
.int64), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'user_size': 4, 'item_size': 4, 'dim': 4, 'weight_decay': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr2 + r0, None)
tmp26 = tl.load(in_ptr4 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + 4 * tmp4, None, eviction_policy='evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 4),
'index out of bounds: 0 <= tmp10 < 4')
tmp12 = tl.load(in_ptr3 + 4 * tmp10, None, eviction_policy='evict_last')
tmp13 = tmp6 * tmp12
tmp14 = tl.load(in_ptr1 + (1 + 4 * tmp4), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr3 + (1 + 4 * tmp10), None, eviction_policy=
'evict_last')
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tl.load(in_ptr1 + (2 + 4 * tmp4), None, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr3 + (2 + 4 * tmp10), None, eviction_policy=
'evict_last')
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp22 = tl.load(in_ptr1 + (3 + 4 * tmp4), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr3 + (3 + 4 * tmp10), None, eviction_policy=
'evict_last')
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp27 = tmp26 + tmp1
tmp28 = tmp26 < 0
tmp29 = tl.where(tmp28, tmp27, tmp26)
tl.device_assert((0 <= tmp29) & (tmp29 < 4),
'index out of bounds: 0 <= tmp29 < 4')
tmp31 = tl.load(in_ptr3 + 4 * tmp29, None, eviction_policy='evict_last')
tmp32 = tmp6 * tmp31
tmp33 = tl.load(in_ptr3 + (1 + 4 * tmp29), None, eviction_policy=
'evict_last')
tmp34 = tmp14 * tmp33
tmp35 = tmp32 + tmp34
tmp36 = tl.load(in_ptr3 + (2 + 4 * tmp29), None, eviction_policy=
'evict_last')
tmp37 = tmp18 * tmp36
tmp38 = tmp35 + tmp37
tmp39 = tl.load(in_ptr3 + (3 + 4 * tmp29), None, eviction_policy=
'evict_last')
tmp40 = tmp22 * tmp39
tmp41 = tmp38 + tmp40
tmp42 = tmp6 * tmp6
tmp43 = tmp14 * tmp14
tmp44 = tmp42 + tmp43
tmp45 = tmp18 * tmp18
tmp46 = tmp44 + tmp45
tmp47 = tmp22 * tmp22
tmp48 = tmp46 + tmp47
tmp49 = libdevice.sqrt(tmp48)
tmp50 = tmp49 * tmp49
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp53 = tl.sum(tmp51, 1)[:, None]
tmp54 = tmp12 * tmp12
tmp55 = tmp15 * tmp15
tmp56 = tmp54 + tmp55
tmp57 = tmp19 * tmp19
tmp58 = tmp56 + tmp57
tmp59 = tmp23 * tmp23
tmp60 = tmp58 + tmp59
tmp61 = libdevice.sqrt(tmp60)
tmp62 = tmp61 * tmp61
tmp63 = tl.broadcast_to(tmp62, [XBLOCK, RBLOCK])
tmp65 = tl.sum(tmp63, 1)[:, None]
tmp66 = tmp31 * tmp31
tmp67 = tmp33 * tmp33
tmp68 = tmp66 + tmp67
tmp69 = tmp36 * tmp36
tmp70 = tmp68 + tmp69
tmp71 = tmp39 * tmp39
tmp72 = tmp70 + tmp71
tmp73 = libdevice.sqrt(tmp72)
tmp74 = tmp73 * tmp73
tmp75 = tl.broadcast_to(tmp74, [XBLOCK, RBLOCK])
tmp77 = tl.sum(tmp75, 1)[:, None]
tmp78 = tmp25 - tmp41
tmp79 = 0.0
tmp80 = triton_helpers.minimum(tmp79, tmp78)
tmp81 = tl_math.abs(tmp78)
tmp82 = -tmp81
tmp83 = tl_math.exp(tmp82)
tmp84 = libdevice.log1p(tmp83)
tmp85 = tmp80 - tmp84
tmp86 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK])
tmp88 = tl.sum(tmp86, 1)[:, None]
tmp89 = -tmp88
tmp90 = tmp53 + tmp65
tmp91 = tmp90 + tmp77
tmp92 = 4.0
tmp93 = tmp91 * tmp92
tmp94 = tmp89 + tmp93
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp94, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf6 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_index_linalg_vector_norm_log_sigmoid_forward_mul_neg_pow_sub_sum_0[
grid(1)](buf6, primals_2, primals_1, primals_4, primals_3,
primals_5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
return buf6, primals_1, primals_2, primals_3, primals_4, primals_5
class BPRNew(nn.Module):
def __init__(self, user_size, item_size, dim, weight_decay):
super().__init__()
self.W = nn.Parameter(torch.empty(user_size, dim))
self.H = nn.Parameter(torch.empty(item_size, dim))
nn.init.xavier_normal_(self.W.data)
nn.init.xavier_normal_(self.H.data)
self.weight_decay = weight_decay
def recommend(self, u):
"""Return recommended item list given users.
Args:
u(torch.LongTensor): tensor stored user indexes. [batch_size,]
Returns:
pred(torch.LongTensor): recommended item list sorted by preference. [batch_size, item_size]
"""
u = self.W[u, :]
x_ui = torch.mm(u, self.H.t())
pred = torch.argsort(x_ui, dim=1)
return pred
def forward(self, input_0, input_1, input_2):
primals_1 = self.W
primals_3 = self.H
primals_2 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
EternalImmortal/bpr
|
BPR
| false
| 9,008
|
[
"MIT"
] | 0
|
ba95806530e51b580359d22ed533ad461124fa22
|
https://github.com/EternalImmortal/bpr/tree/ba95806530e51b580359d22ed533ad461124fa22
|
SigmoidCrossEntropyLoss
|
import torch
from torch import Tensor
from typing import List
from typing import Optional
from typing import Union
from torch import nn
class SigmoidCrossEntropyLoss(nn.Module):
def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None,
**kwargs):
"""
Params:
class_weights: List or 1D tensor of length equal to number of classes.
"""
super().__init__()
if class_weights:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none',
pos_weight=torch.Tensor(class_weights))
else:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, preds: 'Tensor', target: 'Tensor') ->Tensor:
if preds.ndim != 2:
raise RuntimeError(
'SigmoidCrossEntropyLoss currently supported for 2D tensors.')
element_loss = self.loss_fn(preds.type(torch.float32), target.type(
torch.float32))
loss = torch.sum(element_loss, dim=1)
loss = torch.mean(loss)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import List
from typing import Optional
from typing import Union
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp14 = tmp1 - tmp13
tmp16 = tmp14 * tmp15
tmp17 = triton_helpers.minimum(tmp5, tmp15)
tmp18 = tl_math.abs(tmp15)
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = libdevice.log1p(tmp20)
tmp22 = tmp17 - tmp21
tmp23 = tmp16 - tmp22
tmp24 = tmp12 + tmp23
tmp26 = tmp1 - tmp25
tmp28 = tmp26 * tmp27
tmp29 = triton_helpers.minimum(tmp5, tmp27)
tmp30 = tl_math.abs(tmp27)
tmp31 = -tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = libdevice.log1p(tmp32)
tmp34 = tmp29 - tmp33
tmp35 = tmp28 - tmp34
tmp36 = tmp24 + tmp35
tmp38 = tmp1 - tmp37
tmp40 = tmp38 * tmp39
tmp41 = triton_helpers.minimum(tmp5, tmp39)
tmp42 = tl_math.abs(tmp39)
tmp43 = -tmp42
tmp44 = tl_math.exp(tmp43)
tmp45 = libdevice.log1p(tmp44)
tmp46 = tmp41 - tmp45
tmp47 = tmp40 - tmp46
tmp48 = tmp36 + tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = 4.0
tmp53 = tmp51 / tmp52
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mean_sum_0[grid(1)](
buf2, arg1_1, arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class SigmoidCrossEntropyLossNew(nn.Module):
def __init__(self, class_weights: 'Optional[Union[Tensor, List]]'=None,
**kwargs):
"""
Params:
class_weights: List or 1D tensor of length equal to number of classes.
"""
super().__init__()
if class_weights:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none',
pos_weight=torch.Tensor(class_weights))
else:
self.loss_fn = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Connormcc3/ludwig
|
SigmoidCrossEntropyLoss
| false
| 9,009
|
[
"Apache-2.0"
] | 0
|
5d562cbc0c4fed3e607969e18611f34240eef177
|
https://github.com/Connormcc3/ludwig/tree/5d562cbc0c4fed3e607969e18611f34240eef177
|
MNISTBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MNISTBlock(nn.Module):
def __init__(self, width, scaling=1.0, use_bias=True):
super(MNISTBlock, self).__init__()
self.scaling = scaling
self.linear = nn.Linear(width, width, bias=use_bias)
nn.init.xavier_normal_(self.linear.weight)
self.shortcut = nn.Sequential()
def forward(self, x):
out = self.linear(x)
out = float(self.scaling) * F.relu(out)
out += self.shortcut(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'width': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.0
tmp10 = tmp4 <= tmp9
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_threshold_backward_0[grid(256)](buf0,
primals_2, primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class MNISTBlockNew(nn.Module):
def __init__(self, width, scaling=1.0, use_bias=True):
super(MNISTBlockNew, self).__init__()
self.scaling = scaling
self.linear = nn.Linear(width, width, bias=use_bias)
nn.init.xavier_normal_(self.linear.weight)
self.shortcut = nn.Sequential()
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EulerInstitute/mgopt_icml21
|
MNISTBlock
| false
| 9,010
|
[
"Apache-2.0"
] | 0
|
3790ac863e22c49e067d2872f7e3ea6e306c65af
|
https://github.com/EulerInstitute/mgopt_icml21/tree/3790ac863e22c49e067d2872f7e3ea6e306c65af
|
StatsPool
|
import torch
import warnings
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class StatsPool(nn.Module):
"""Statistics pooling
Compute temporal mean and (unbiased) standard deviation
and returns their concatenation.
Reference
---------
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
"""
def forward(self, sequences: 'torch.Tensor', weights:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Forward pass
Parameters
----------
sequences : (batch, channel, frames) torch.Tensor
Sequences.
weights : (batch, frames) torch.Tensor, optional
When provided, compute weighted mean and standard deviation.
Returns
-------
output : (batch, 2 * channel) torch.Tensor
Concatenation of mean and (unbiased) standard deviation.
"""
if weights is None:
mean = sequences.mean(dim=2)
std = sequences.std(dim=2, unbiased=True)
else:
weights = weights.unsqueeze(dim=1)
num_frames = sequences.shape[2]
num_weights = weights.shape[2]
if num_frames != num_weights:
warnings.warn(
f'Mismatch between frames ({num_frames}) and weights ({num_weights}) numbers.'
)
weights = F.interpolate(weights, size=num_frames, mode=
'linear', align_corners=False)
v1 = weights.sum(dim=2)
mean = torch.sum(sequences * weights, dim=2) / v1
dx2 = torch.square(sequences - mean.unsqueeze(2))
v2 = torch.square(weights).sum(dim=2)
var = torch.sum(dx2 * weights, dim=2) / (v1 - v2 / v1)
std = torch.sqrt(var)
return torch.cat([mean, std], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (12 + x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp20 = tl.load(in_ptr0 + (4 + x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.load(in_ptr0 + (8 + x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr0 + (12 + x0 + 16 * (-4 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 / tmp12
tmp27 = tmp19 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp20 - tmp26
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp26
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp24 - tmp26
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = 3.0
tmp39 = tmp37 / tmp38
tmp40 = libdevice.sqrt(tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp16, tmp40, tmp41)
tmp43 = tl.where(tmp4, tmp15, tmp42)
tl.store(out_ptr0 + x3, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class StatsPoolNew(nn.Module):
"""Statistics pooling
Compute temporal mean and (unbiased) standard deviation
and returns their concatenation.
Reference
---------
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
FrenchKrab/pyannote-audio
|
StatsPool
| false
| 9,011
|
[
"MIT"
] | 0
|
14e3b999e3b3fa6063d6401c375a9f7a2534cb74
|
https://github.com/FrenchKrab/pyannote-audio/tree/14e3b999e3b3fa6063d6401c375a9f7a2534cb74
|
idct_8x8
|
import itertools
import torch
import numpy as np
import torch.nn as nn
class idct_8x8(nn.Module):
""" Inverse discrete Cosine Transformation
Input:
dcp(tensor): batch x height x width
Output:
image(tensor): batch x height x width
"""
def __init__(self):
super(idct_8x8, self).__init__()
alpha = np.array([1.0 / np.sqrt(2)] + [1] * 7)
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).
float())
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
(2 * v + 1) * y * np.pi / 16)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
def forward(self, image):
image = image * self.alpha
result = 0.25 * torch.tensordot(image, self.tensor, dims=2) + 128
result.view(image.shape)
return result
def get_inputs():
return [torch.rand([4, 4, 8, 8])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import itertools
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 128.0
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_3, (8, 8, 8, 8), (512, 64, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(1024)](primals_2, primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 64), (64, 1), 0),
reinterpret_tensor(primals_3, (64, 64), (64, 1), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 8, 8), (256, 64, 8, 1), 0)
del buf1
triton_poi_fused_add_mul_1[grid(1024)](buf2, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
return buf2, primals_2, reinterpret_tensor(buf0, (64, 16), (1, 64), 0
), reinterpret_tensor(primals_3, (64, 64), (1, 64), 0)
class idct_8x8New(nn.Module):
""" Inverse discrete Cosine Transformation
Input:
dcp(tensor): batch x height x width
Output:
image(tensor): batch x height x width
"""
def __init__(self):
super(idct_8x8New, self).__init__()
alpha = np.array([1.0 / np.sqrt(2)] + [1] * 7)
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).
float())
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
(2 * v + 1) * y * np.pi / 16)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
def forward(self, input_0):
primals_1 = self.alpha
primals_3 = self.tensor
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
DazhiZhong/DiffJPEG
|
idct_8x8
| false
| 9,012
|
[
"MIT"
] | 0
|
e20de92539f31a57906ae4c32a41dc46e774c316
|
https://github.com/DazhiZhong/DiffJPEG/tree/e20de92539f31a57906ae4c32a41dc46e774c316
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.