entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
BhattacharyyaDistance
|
import torch
import torch.nn as nn
class BhattacharyyaDistance(nn.Module):
def __init__(self):
super(BhattacharyyaDistance, self).__init__()
def forward(self, hist1, hist2):
bh_dist = torch.sqrt(hist1 * hist2).sum()
return bh_dist
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sqrt_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BhattacharyyaDistanceNew(nn.Module):
def __init__(self):
super(BhattacharyyaDistanceNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
BhattacharyyaDistance
| false
| 4,438
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
l1normalization
|
import torch
import torch.nn as nn
class l1normalization(nn.Module):
def __init__(self, scale):
super(l1normalization, self).__init__()
self.scale = scale
def forward(self, x, dim=1):
return self.scale * x * x.pow(1).sum(dim).clamp(min=1e-12).rsqrt(
).expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 1e-12
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class l1normalizationNew(nn.Module):
def __init__(self, scale):
super(l1normalizationNew, self).__init__()
self.scale = scale
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
l1normalization
| false
| 4,439
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
Conv2dWithConstraint
|
import torch
from torch import nn
class Conv2dWithConstraint(nn.Conv2d):
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(Conv2dWithConstraint, self).__init__(*args, **kwargs)
def forward(self, x):
self.weight.data = torch.renorm(self.weight.data, p=2, dim=0,
maxnorm=self.max_norm)
return super(Conv2dWithConstraint, self).forward(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_renorm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl
.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = 1.0
tmp8 = tmp6 > tmp7
tmp9 = 1e-07
tmp10 = tmp6 + tmp9
tmp11 = tl.full([1, 1], 1, tl.int32)
tmp12 = tmp11 / tmp10
tmp13 = tmp12 * tmp7
tmp14 = tl.where(tmp8, tmp13, tmp7)
tmp15 = tmp0 * tmp14
tl.store(out_ptr1 + (r1 + 64 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_renorm_0[grid(4)](primals_1, buf1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = torch.ops.aten.set_.source_Tensor(primals_1, buf1)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
del buf2
del primals_1
return buf3, primals_3, buf1
class Conv2dWithConstraintNew(nn.Conv2d):
def __init__(self, *args, max_norm=1, **kwargs):
self.max_norm = max_norm
super(Conv2dWithConstraintNew, self).__init__(*args, **kwargs)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tomMoral/braindecode
|
Conv2dWithConstraint
| false
| 4,440
|
[
"BSD-3-Clause"
] | 0
|
09d63b7e32fdfcfbaac7569a003f2611721a78ca
|
https://github.com/tomMoral/braindecode/tree/09d63b7e32fdfcfbaac7569a003f2611721a78ca
|
KLCoefficient
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class KLCoefficient(nn.Module):
def __init__(self):
super(KLCoefficient, self).__init__()
def forward(self, hist1, hist2):
kl = F.kl_div(hist1, hist2)
dist = 1.0 / 1 + kl
return dist
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_sub_xlogy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.isnan(tmp0).to(tl.int1)
tmp2 = 0.0
tmp3 = tmp0 == tmp2
tmp4 = tl_math.log(tmp0)
tmp5 = tmp0 * tmp4
tmp6 = tl.where(tmp3, tmp2, tmp5)
tmp7 = float('nan')
tmp8 = tl.where(tmp1, tmp7, tmp6)
tmp10 = tmp0 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tmp17 = 1.0
tmp18 = tmp16 + tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_sub_xlogy_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLCoefficientNew(nn.Module):
def __init__(self):
super(KLCoefficientNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
KLCoefficient
| false
| 4,441
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
ConstractiveLoss
|
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
class ConstractiveLoss(nn.Module):
def __init__(self, margin=2.0, dist_flag='l2'):
super(ConstractiveLoss, self).__init__()
self.margin = margin
self.dist_flag = dist_flag
def various_distance(self, out_vec_t0, out_vec_t1):
if self.dist_flag == 'l2':
distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)
if self.dist_flag == 'l1':
distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=1)
if self.dist_flag == 'cos':
similarity = F.cosine_similarity(out_vec_t0, out_vec_t1)
distance = 1 - 2 * similarity / np.pi
return distance
def forward(self, out_vec_t0, out_vec_t1, label):
distance = self.various_distance(out_vec_t0, out_vec_t1)
constractive_loss = torch.sum((1 - label) * torch.pow(distance, 2) +
label * torch.pow(torch.clamp(self.margin - distance, min=0.0), 2))
return constractive_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mul_pow_rsub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 2.0
tmp7 = tmp6 - tmp3
tmp8 = 0.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp9 * tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp5 + tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_mul_pow_rsub_sum_1[grid(1)](arg2_1, buf0,
buf1, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf1,
class ConstractiveLossNew(nn.Module):
def __init__(self, margin=2.0, dist_flag='l2'):
super(ConstractiveLossNew, self).__init__()
self.margin = margin
self.dist_flag = dist_flag
def various_distance(self, out_vec_t0, out_vec_t1):
if self.dist_flag == 'l2':
distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)
if self.dist_flag == 'l1':
distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=1)
if self.dist_flag == 'cos':
similarity = F.cosine_similarity(out_vec_t0, out_vec_t1)
distance = 1 - 2 * similarity / np.pi
return distance
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
ConstractiveLoss
| false
| 4,442
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
l2normalization
|
import torch
import torch.nn as nn
class l2normalization(nn.Module):
def __init__(self, scale):
super(l2normalization, self).__init__()
self.scale = scale
def forward(self, x, dim=1):
"""out = scale * x / sqrt(\\sum x_i^2)"""
return self.scale * x * x.pow(2).sum(dim).clamp(min=1e-12).rsqrt(
).expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = 1e-12
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp2 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class l2normalizationNew(nn.Module):
def __init__(self, scale):
super(l2normalizationNew, self).__init__()
self.scale = scale
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
l2normalization
| false
| 4,443
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
scale_feature
|
import torch
import torch.nn as nn
class scale_feature(nn.Module):
def __init__(self, scale):
super(scale_feature, self).__init__()
self.scale = scale
def forward(self, x):
return self.scale * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class scale_featureNew(nn.Module):
def __init__(self, scale):
super(scale_featureNew, self).__init__()
self.scale = scale
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
scale_feature
| false
| 4,444
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
DQFFN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQFFN(nn.Module):
def __init__(self, n):
"""
Create Feed-forward Network with n dim input and n dim output
"""
super(DQFFN, self).__init__()
self.n = n
self.l1 = nn.Linear(n * (n + 1) // 2, 2048)
self.l2 = nn.Linear(2048, 1024)
self.l3 = nn.Linear(1024, n)
def forward(self, x):
"""
input is of shape (batch_size, n, n)
"""
upper_indices = torch.triu_indices(self.n, self.n)
x = x[:, upper_indices[0], upper_indices[1]]
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
return self.l3(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_triu_indices_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp0.to(tl.float64)
tmp6 = tl.full([1], 2.0, tl.float64)
tmp7 = tmp5 * tmp6
tmp8 = tl.full([1], 20.25, tl.float64)
tmp9 = tmp8 - tmp7
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full([1], 4.5, tl.float64)
tmp12 = tmp11 - tmp10
tmp13 = libdevice.floor(tmp12)
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 + tmp1
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp4, tmp15, tmp16)
tmp18 = tmp0 >= tmp3
tl.full([1], 20, tl.int64)
tmp21 = -10 + x0
tmp22 = tmp21.to(tl.float64)
tmp23 = tmp22 * tmp6
tmp24 = tmp8 - tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp11 - tmp25
tmp27 = libdevice.floor(tmp26)
tmp28 = tl.full([1], 7.0, tl.float64)
tmp29 = tmp28 - tmp27
tmp30 = tmp29 * tmp27
tmp31 = tl.full([1], 0.5, tl.float64)
tmp32 = tmp30 * tmp31
tmp33 = tmp22 - tmp32
tmp34 = libdevice.floor(tmp33)
tmp35 = tmp34.to(tl.int64)
tmp36 = tmp35 + tmp1
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp18, tmp36, tmp37)
tmp39 = tl.where(tmp4, tmp17, tmp38)
tl.store(out_ptr0 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (10 + x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp7 = tmp6 + tmp1
tmp8 = tmp6 < 0
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.device_assert((0 <= tmp9) & (tmp9 < 4) | ~xmask,
'index out of bounds: 0 <= tmp9 < 4')
tmp11 = tl.load(in_ptr1 + (tmp9 + 4 * tmp4 + 16 * x1), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2048, 10), (10, 1))
assert_size_stride(primals_3, (2048,), (1,))
assert_size_stride(primals_4, (1024, 2048), (2048, 1))
assert_size_stride(primals_5, (1024,), (1,))
assert_size_stride(primals_6, (4, 1024), (1024, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((20,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_triu_indices_0[grid(20)](buf0, 20, XBLOCK=32,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_poi_fused_index_1[grid(40)](buf0, primals_1, buf1, 40,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_1
buf2 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (10, 2048), (
1, 10), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_2[grid(8192)](buf3, primals_3, 8192, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (2048, 1024),
(1, 2048), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_3[grid(4096)](buf5, primals_5, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf6)
del primals_7
return buf6, buf1, buf3, buf5, primals_6, primals_4
class DQFFNNew(nn.Module):
def __init__(self, n):
"""
Create Feed-forward Network with n dim input and n dim output
"""
super(DQFFNNew, self).__init__()
self.n = n
self.l1 = nn.Linear(n * (n + 1) // 2, 2048)
self.l2 = nn.Linear(2048, 1024)
self.l3 = nn.Linear(1024, n)
def forward(self, input_0):
primals_2 = self.l1.weight
primals_3 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
thomashopkins32/RedBlueGame
|
DQFFN
| false
| 4,445
|
[
"MIT"
] | 0
|
dd3e759123acc02375fdfcc504892e00e6b31ef1
|
https://github.com/thomashopkins32/RedBlueGame/tree/dd3e759123acc02375fdfcc504892e00e6b31ef1
|
L2Norm
|
import torch
from math import sqrt as sqrt
from itertools import product as product
import torch.nn as nn
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x /= norm
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from math import sqrt as sqrt
from itertools import product as product
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp15
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp17, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
primals_2, buf0, buf1, primals_1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
tomgause/pytorch-ssd
|
L2Norm
| false
| 4,446
|
[
"MIT"
] | 0
|
e458d4319deb21c8970bcce13382e7ada70ea1a2
|
https://github.com/tomgause/pytorch-ssd/tree/e458d4319deb21c8970bcce13382e7ada70ea1a2
|
FeatureCorrelation
|
import torch
import torch.nn as nn
class FeatureCorrelation(nn.Module):
def __init__(self, scale):
super(FeatureCorrelation, self).__init__()
self.scale = scale
def forward(self, feature_A, feature_B):
b, c, h, w = feature_A.size()
feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w)
feature_B = feature_B.view(b, c, h * w).transpose(1, 2)
feature_mul = torch.bmm(feature_B, feature_A)
correlation_tensor = self.scale * feature_mul.view(b, h, w, h * w
).transpose(2, 3).transpose(1, 2)
return correlation_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1,
16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0),
out=buf1)
del arg1_1
del buf0
buf2 = reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 1, 64, 16), 0)
del buf1
triton_poi_fused_mul_1[grid(1024)](buf2, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
return buf2,
class FeatureCorrelationNew(nn.Module):
def __init__(self, scale):
super(FeatureCorrelationNew, self).__init__()
self.scale = scale
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
FeatureCorrelation
| false
| 4,447
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
ConstractiveThresholdHingeLoss
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class ConstractiveThresholdHingeLoss(nn.Module):
def __init__(self, hingethresh=0.0, margin=2.0):
super(ConstractiveThresholdHingeLoss, self).__init__()
self.threshold = hingethresh
self.margin = margin
def forward(self, out_vec_t0, out_vec_t1, label):
distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)
similar_pair = torch.clamp(distance - self.threshold, min=0.0)
dissimilar_pair = torch.clamp(self.margin - distance, min=0.0)
constractive_thresh_loss = torch.sum((1 - label) * torch.pow(
similar_pair, 2) + label * torch.pow(dissimilar_pair, 2))
return constractive_thresh_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = 1e-06
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 + tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 + tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tl.store(out_ptr0 + x0, tmp24, xmask)
@triton.jit
def triton_per_fused_add_clamp_mul_pow_rsub_sub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = 0.0
tmp5 = tmp3 - tmp4
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp6 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = 2.0
tmp10 = tmp9 - tmp3
tmp11 = triton_helpers.maximum(tmp10, tmp4)
tmp12 = tmp11 * tmp11
tmp13 = tmp0 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_mul_pow_rsub_sub_sum_1[grid(1)](arg2_1,
buf0, buf1, 1, 256, num_warps=2, num_stages=1)
del arg2_1
del buf0
return buf1,
class ConstractiveThresholdHingeLossNew(nn.Module):
def __init__(self, hingethresh=0.0, margin=2.0):
super(ConstractiveThresholdHingeLossNew, self).__init__()
self.threshold = hingethresh
self.margin = margin
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
tommy90191/Find_Tiny_but_Important_Image_Changes
|
ConstractiveThresholdHingeLoss
| false
| 4,448
|
[
"MIT"
] | 0
|
429d679606f96f32db4cddf167a9cfb963d3df26
|
https://github.com/tommy90191/Find_Tiny_but_Important_Image_Changes/tree/429d679606f96f32db4cddf167a9cfb963d3df26
|
ResNetBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResNetBlock(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int',
hid_channels: 'int', bias: 'bool'):
super().__init__()
self.shortcut = in_channels != out_channels
self.conv_0 = nn.Conv2d(in_channels, hid_channels, 3, stride=1,
padding=1)
self.conv_1 = nn.Conv2d(hid_channels, out_channels, 3, stride=1,
padding=1, bias=bias)
if self.shortcut:
self.conv_shortcut = nn.Conv2d(in_channels, out_channels, 1,
stride=1, bias=False)
def forward(self, x):
xs = x if not self.shortcut else self.conv_shortcut(x)
x = F.leaky_relu(self.conv_0(x), 0.2)
x = F.leaky_relu(self.conv_1(x), 0.2)
return xs + 0.1 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'hid_channels': 4,
'bias': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_mul_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp6 = 0.2
tmp7 = tmp2 * tmp6
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tmp5 + tmp10
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0,
primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_add_convolution_leaky_relu_mul_1[grid(256)](buf3,
primals_5, primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf3
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf2, buf4
class ResNetBlockNew(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int',
hid_channels: 'int', bias: 'bool'):
super().__init__()
self.shortcut = in_channels != out_channels
self.conv_0 = nn.Conv2d(in_channels, hid_channels, 3, stride=1,
padding=1)
self.conv_1 = nn.Conv2d(hid_channels, out_channels, 3, stride=1,
padding=1, bias=bias)
if self.shortcut:
self.conv_shortcut = nn.Conv2d(in_channels, out_channels, 1,
stride=1, bias=False)
def forward(self, input_0):
primals_2 = self.conv_0.weight
primals_3 = self.conv_0.bias
primals_4 = self.conv_1.weight
primals_5 = self.conv_1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
tmralmeida/VGAN
|
ResNetBlock
| false
| 4,449
|
[
"MIT"
] | 0
|
103d2e7ac0b84b08ff3c3a40e0ccb16390b1e008
|
https://github.com/tmralmeida/VGAN/tree/103d2e7ac0b84b08ff3c3a40e0ccb16390b1e008
|
Affine
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, x):
return torch.addcmul(self.beta, self.alpha, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_addcmul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_addcmul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, primals_3
class AffineNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = self.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tor4z/pytorch-image-models
|
Affine
| false
| 4,450
|
[
"Apache-2.0"
] | 0
|
d7bab8a6c52a72487d1bed0a28aad41e326d7622
|
https://github.com/tor4z/pytorch-image-models/tree/d7bab8a6c52a72487d1bed0a28aad41e326d7622
|
L1
|
import torch
import torch.nn as nn
class L1(nn.Module):
def __init__(self):
super(L1, self).__init__()
def forward(self, output, target):
lossvalue = torch.abs(output[:, None] - target).mean()
return lossvalue
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex // 256
r3 = rindex % 256
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1024.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1,
1024, num_warps=8, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1New(nn.Module):
def __init__(self):
super(L1New, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tomrunia/flownet2-pytorch
|
L1
| false
| 4,451
|
[
"Apache-2.0"
] | 0
|
759b09c375348cf64f52f914cf3bf3e9095cc959
|
https://github.com/tomrunia/flownet2-pytorch/tree/759b09c375348cf64f52f914cf3bf3e9095cc959
|
L2
|
import torch
import torch.nn as nn
class L2(nn.Module):
def __init__(self):
super(L2, self).__init__()
def forward(self, output, target):
lossvalue = torch.norm(output[:, None] - target, p=2, dim=1).mean()
return lossvalue
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (128 + r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (192 + r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp0 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp0 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L2New(nn.Module):
def __init__(self):
super(L2New, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tomrunia/flownet2-pytorch
|
L2
| false
| 4,452
|
[
"Apache-2.0"
] | 0
|
759b09c375348cf64f52f914cf3bf3e9095cc959
|
https://github.com/tomrunia/flownet2-pytorch/tree/759b09c375348cf64f52f914cf3bf3e9095cc959
|
AvgConsensus
|
import torch
import torch.nn as nn
class AvgConsensus(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
"""Defines the computation performed at every call."""
return x.mean(dim=self.dim, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgConsensusNew(nn.Module):
"""Average consensus module.
Args:
dim (int): Decide which dim consensus function to apply.
Default: 1.
"""
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
scenarios/dev
|
AvgConsensus
| false
| 4,453
|
[
"Apache-2.0"
] | 0
|
9f91ebc142cea1c31231d233571ad59460ab6fba
|
https://github.com/scenarios/dev/tree/9f91ebc142cea1c31231d233571ad59460ab6fba
|
WeightNet
|
import torch
import torch.nn as nn
class WeightNet(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.conv.bias.data[...] = 0
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, self.groups, t)
x = x.permute(0, 2, 1)
x = 2 * self.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 2.0
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(16)](buf1,
primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_1, primals_2, buf1
class WeightNetNew(nn.Module):
"""WeightNet in Temporal interlace module.
The WeightNet consists of two parts: one convolution layer
and a sigmoid function. Following the convolution layer, the sigmoid
function and rescale module can scale our output to the range (0, 2).
Here we set the initial bias of the convolution layer to 0, and the
final initial output will be 1.0.
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
"""
def __init__(self, in_channels, groups):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.groups = groups
self.conv = nn.Conv1d(in_channels, groups, 3, padding=1)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.conv.bias.data[...] = 0
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
scenarios/dev
|
WeightNet
| false
| 4,454
|
[
"Apache-2.0"
] | 0
|
9f91ebc142cea1c31231d233571ad59460ab6fba
|
https://github.com/scenarios/dev/tree/9f91ebc142cea1c31231d233571ad59460ab6fba
|
BinaryLogisticRegressionLoss
|
import torch
import torch.nn as nn
def binary_logistic_regression_loss(reg_score, label, threshold=0.5,
ratio_range=(1.05, 21), eps=1e-05):
"""Binary Logistic Regression Loss."""
label = label.view(-1)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float()
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask
) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self, reg_score, label, threshold=0.5, ratio_range=(1.05,
21), eps=1e-05):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_clamp_div_gt_log_mean_mul_neg_reciprocal_rsub_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp19 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp9 / tmp8
tmp11 = 256.0
tmp12 = tmp10 * tmp11
tmp13 = 1.05
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 21.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp3
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp18 * tmp22
tmp24 = tmp16 - tmp7
tmp25 = tmp17 / tmp24
tmp26 = tmp7 - tmp3
tmp27 = tmp25 * tmp26
tmp28 = tmp7 - tmp19
tmp29 = tmp28 + tmp20
tmp30 = tl_math.log(tmp29)
tmp31 = tmp27 * tmp30
tmp32 = tmp23 + tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = tmp35 / tmp11
tmp37 = -tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_div_gt_log_mean_mul_neg_reciprocal_rsub_sub_sum_0[
grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def binary_logistic_regression_loss(reg_score, label, threshold=0.5,
ratio_range=(1.05, 21), eps=1e-05):
"""Binary Logistic Regression Loss."""
label = label.view(-1)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float()
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask
) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
class BinaryLogisticRegressionLossNew(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score and
label.
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
scenarios/dev
|
BinaryLogisticRegressionLoss
| false
| 4,455
|
[
"Apache-2.0"
] | 0
|
9f91ebc142cea1c31231d233571ad59460ab6fba
|
https://github.com/scenarios/dev/tree/9f91ebc142cea1c31231d233571ad59460ab6fba
|
GCN
|
import torch
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
class GCN(nn.Module):
def __init__(self, num_state, num_node, bias=False):
super(GCN, self).__init__()
self.conv1 = nn.Conv1d(num_node, num_node, kernel_size=1, padding=0,
stride=1, groups=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d(num_state, num_state, kernel_size=1, padding
=0, stride=1, groups=1, bias=bias)
def forward(self, x):
h = self.conv1(x.permute(0, 2, 1).contiguous()).permute(0, 2, 1)
h = h + x
h = self.relu(h)
h = self.conv2(h)
return h
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_state': 4, 'num_node': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_relu_1(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp6, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0)
del buf1
triton_poi_fused_add_relu_1[grid(16, 4)](buf2, primals_3, primals_1,
16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_1
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_0[grid(16, 4)](buf2, buf3, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
del buf3
return buf4, primals_2, primals_4, buf0, buf2
class GCNNew(nn.Module):
def __init__(self, num_state, num_node, bias=False):
super(GCNNew, self).__init__()
self.conv1 = nn.Conv1d(num_node, num_node, kernel_size=1, padding=0,
stride=1, groups=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d(num_state, num_state, kernel_size=1, padding
=0, stride=1, groups=1, bias=bias)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tousifulhaque/DANet
|
GCN
| false
| 4,456
|
[
"MIT"
] | 0
|
1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
https://github.com/tousifulhaque/DANet/tree/1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
Normalize
|
import torch
from torchvision.datasets import *
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import *
class Normalize(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(Normalize, self).__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-08
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\\lVert v \\rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(NormalizeNew, self).__init__()
self.p = p
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tousifulhaque/DANet
|
Normalize
| false
| 4,457
|
[
"MIT"
] | 0
|
1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
https://github.com/tousifulhaque/DANet/tree/1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
OffsetNet
|
import torch
import torch.nn as nn
class OffsetNet(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
n, _, t = x.shape
x = self.conv(x)
x = x.view(n, t)
x = self.relu(self.fc1(x))
x = self.fc2(x)
x = x.view(n, 1, -1)
x = 4 * (self.sigmoid(x) - 0.5)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1, 'num_segments': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.5
tmp3 = tmp1 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_mul_sigmoid_sub_2[grid(4)](buf5, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return buf6, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), buf3, buf5, primals_6, primals_4
class OffsetNetNew(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
scenarios/dev
|
OffsetNet
| false
| 4,458
|
[
"Apache-2.0"
] | 0
|
9f91ebc142cea1c31231d233571ad59460ab6fba
|
https://github.com/scenarios/dev/tree/9f91ebc142cea1c31231d233571ad59460ab6fba
|
TwoPartSimpleModel
|
import torch
import torch.nn as nn
import torch.utils.data
class SimpleModel(nn.Module):
def forward(self, x):
return 2 * x
def prepare_for_export(self, cfg, inputs, predictor_type):
return PredictorExportConfig(model=self, data_generator=lambda x: (x,))
class TwoPartSimpleModel(nn.Module):
"""
Suppose there're some function in the middle that can't be traced, therefore we
need to export the model as two parts.
"""
def __init__(self):
super().__init__()
self.part1 = SimpleModel()
self.part2 = SimpleModel()
def forward(self, x):
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = self.part2(x)
return x
def prepare_for_export(self, cfg, inputs, predictor_type):
def data_generator(x):
part1_args = x,
x = self.part1(x)
x = TwoPartSimpleModel.non_traceable_func(x)
part2_args = x,
return {'part1': part1_args, 'part2': part2_args}
return PredictorExportConfig(model={'part1': self.part1, 'part2':
self.part2}, data_generator=data_generator, run_func_info=
FuncInfo.gen_func_info(TwoPartSimpleModel.RunFunc, params={}))
@staticmethod
def non_traceable_func(x):
return x + 1 if len(x.shape) > 3 else x - 1
class RunFunc(object):
def __call__(self, model, x):
assert isinstance(model, dict)
x = model['part1'](x)
x = TwoPartSimpleModel.non_traceable_func(x)
x = model['part2'](x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tmp4 * tmp1
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SimpleModel(nn.Module):
def forward(self, x):
return 2 * x
def prepare_for_export(self, cfg, inputs, predictor_type):
return PredictorExportConfig(model=self, data_generator=lambda x: (x,))
class TwoPartSimpleModelNew(nn.Module):
"""
Suppose there're some function in the middle that can't be traced, therefore we
need to export the model as two parts.
"""
def __init__(self):
super().__init__()
self.part1 = SimpleModel()
self.part2 = SimpleModel()
def prepare_for_export(self, cfg, inputs, predictor_type):
def data_generator(x):
part1_args = x,
x = self.part1(x)
x = TwoPartSimpleModelNew.non_traceable_func(x)
part2_args = x,
return {'part1': part1_args, 'part2': part2_args}
return PredictorExportConfig(model={'part1': self.part1, 'part2':
self.part2}, data_generator=data_generator, run_func_info=
FuncInfo.gen_func_info(TwoPartSimpleModelNew.RunFunc, params={}))
@staticmethod
def non_traceable_func(x):
return x + 1 if len(x.shape) > 3 else x - 1
class RunFunc(object):
def __call__(self, model, x):
assert isinstance(model, dict)
x = model['part1'](x)
x = TwoPartSimpleModelNew.non_traceable_func(x)
x = model['part2'](x)
return x
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tsubauaaa/d2go
|
TwoPartSimpleModel
| false
| 4,459
|
[
"Apache-2.0"
] | 0
|
9f746159ebf78ce79f644c405ca8695bc29d1075
|
https://github.com/tsubauaaa/d2go/tree/9f746159ebf78ce79f644c405ca8695bc29d1075
|
CPAMDec
|
from torch.nn import Module
import torch
from torchvision.datasets import *
from torch.nn import Parameter
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Softmax
from torchvision.transforms import *
class CPAMDec(Module):
"""
CPAM decoding module
"""
def __init__(self, in_channels):
super(CPAMDec, self).__init__()
self.softmax = Softmax(dim=-1)
self.scale = Parameter(torch.zeros(1))
self.conv_query = Conv2d(in_channels=in_channels, out_channels=
in_channels // 4, kernel_size=1)
self.conv_key = Linear(in_channels, in_channels // 4)
self.conv_value = Linear(in_channels, in_channels)
def forward(self, x, y):
"""
inputs :
x : input feature(N,C,H,W) y:gathering centers(N,K,M)
returns :
out : compact position attention feature
attention map: (H*W)*M
"""
m_batchsize, C, width, height = x.size()
m_batchsize, K, _M = y.size()
proj_query = self.conv_query(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.conv_key(y).view(m_batchsize, K, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.conv_value(y).permute(0, 2, 1)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.scale * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torchvision.datasets import *
from torch.nn import Parameter
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Softmax
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf3, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf7)
del primals_7
del primals_8
buf8 = reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf7, (4, 4, 4), (16, 1, 4),
0), reinterpret_tensor(buf6, (4, 4, 16), (64, 1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_3[grid(256)](primals_9, buf8, primals_1,
buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf9, primals_1, primals_3, primals_9, reinterpret_tensor(primals_2,
(16, 4), (4, 1), 0), buf6, buf8, reinterpret_tensor(buf7, (4, 4, 4),
(16, 4, 1), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 16, 1), 0
), reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0)
class CPAMDecNew(Module):
"""
CPAM decoding module
"""
def __init__(self, in_channels):
super(CPAMDecNew, self).__init__()
self.softmax = Softmax(dim=-1)
self.scale = Parameter(torch.zeros(1))
self.conv_query = Conv2d(in_channels=in_channels, out_channels=
in_channels // 4, kernel_size=1)
self.conv_key = Linear(in_channels, in_channels // 4)
self.conv_value = Linear(in_channels, in_channels)
def forward(self, input_0, input_1):
primals_4 = self.scale
primals_3 = self.conv_query.weight
primals_6 = self.conv_query.bias
primals_5 = self.conv_key.weight
primals_9 = self.conv_key.bias
primals_7 = self.conv_value.weight
primals_8 = self.conv_value.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
tousifulhaque/DANet
|
CPAMDec
| false
| 4,460
|
[
"MIT"
] | 0
|
1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
https://github.com/tousifulhaque/DANet/tree/1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
SplitAndConcat
|
import torch
import torch.nn as nn
import torch.utils.data
class SplitAndConcat(nn.Module):
"""Split the data from split_dim and concatenate in concat_dim.
@param split_dim from which axis the data will be chunk
@param concat_dim to which axis the data will be concatenated
@param chunk size of the data to be chunk/concatenated
"""
def __init__(self, split_dim: 'int'=1, concat_dim: 'int'=0, chunk: 'int'=2
):
super(SplitAndConcat, self).__init__()
self.split_dim = split_dim
self.concat_dim = concat_dim
self.chunk = chunk
def forward(self, x):
x = torch.chunk(x, self.chunk, dim=self.split_dim)
x = torch.cat(x, dim=self.concat_dim)
return x
def extra_repr(self):
return (
f'split_dim={self.split_dim}, concat_dim={self.concat_dim}, chunk={self.chunk}'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 32
x0 = xindex % 32
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * (-4 + x1)), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SplitAndConcatNew(nn.Module):
"""Split the data from split_dim and concatenate in concat_dim.
@param split_dim from which axis the data will be chunk
@param concat_dim to which axis the data will be concatenated
@param chunk size of the data to be chunk/concatenated
"""
def __init__(self, split_dim: 'int'=1, concat_dim: 'int'=0, chunk: 'int'=2
):
super(SplitAndConcatNew, self).__init__()
self.split_dim = split_dim
self.concat_dim = concat_dim
self.chunk = chunk
def extra_repr(self):
return (
f'split_dim={self.split_dim}, concat_dim={self.concat_dim}, chunk={self.chunk}'
)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tsubauaaa/d2go
|
SplitAndConcat
| false
| 4,461
|
[
"Apache-2.0"
] | 0
|
9f746159ebf78ce79f644c405ca8695bc29d1075
|
https://github.com/tsubauaaa/d2go/tree/9f746159ebf78ce79f644c405ca8695bc29d1075
|
GELU
|
import torch
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp3 * tmp0
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
txsing/augmix
|
GELU
| false
| 4,462
|
[
"Apache-2.0"
] | 0
|
9127809d8534ccb20a654f631833153e75a277fd
|
https://github.com/txsing/augmix/tree/9127809d8534ccb20a654f631833153e75a277fd
|
InstanceNormLayer
|
import torch
from torch import nn
class InstanceNormLayer(nn.Module):
"""Implements instance normalization layer."""
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, x):
if len(x.shape) != 4:
raise ValueError(
f'The input tensor should be with shape [batch_size, num_channels, height, width], but {x.shape} received!'
)
x = x - torch.mean(x, dim=[2, 3], keepdim=True)
x = x / torch.sqrt(torch.mean(x ** 2, dim=[2, 3], keepdim=True) +
self.epsilon)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_pow_sqrt_sub_0(in_ptr0, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp13 = tmp12 / tmp5
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp7 / tmp16
tl.store(out_ptr2 + (r1 + 16 * x0), tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_pow_sqrt_sub_0[grid(16)](arg0_1, buf2,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class InstanceNormLayerNew(nn.Module):
"""Implements instance normalization layer."""
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tylerwilliams/InterFaceGAN
|
InstanceNormLayer
| false
| 4,463
|
[
"MIT"
] | 0
|
120babcc0dc777aa902ef0dcdeaec7c528369dbc
|
https://github.com/tylerwilliams/InterFaceGAN/tree/120babcc0dc777aa902ef0dcdeaec7c528369dbc
|
CCAMDec
|
from torch.nn import Module
import torch
from torchvision.datasets import *
from torch.nn import Parameter
from torch.nn import Softmax
from torchvision.transforms import *
class CCAMDec(Module):
"""
CCAM decoding module
"""
def __init__(self):
super(CCAMDec, self).__init__()
self.softmax = Softmax(dim=-1)
self.scale = Parameter(torch.zeros(1))
def forward(self, x, y):
"""
inputs :
x : input feature(N,C,H,W) y:gathering centers(N,K,H,W)
returns :
out : compact channel attention feature
attention map: K*C
"""
m_batchsize, C, width, height = x.size()
x_reshape = x.view(m_batchsize, C, -1)
B, K, _W, _H = y.size()
y_reshape = y.view(B, K, -1)
proj_query = x_reshape
proj_key = y_reshape.permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy
) - energy
attention = self.softmax(energy_new)
proj_value = y.view(B, K, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, width, height)
out = x + self.scale * out
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
from torchvision.datasets import *
from torch.nn import Parameter
from torch.nn import Softmax
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + x2, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp8 = tmp6 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64,
16, 1), 0), reinterpret_tensor(primals_2, (4, 16, 4), (64, 1,
16), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(primals_2, (4, 4, 16),
(64, 16, 1), 0), out=buf4)
del buf3
del primals_2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_3[grid(256)](primals_1, primals_3, buf4,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf5, buf4
class CCAMDecNew(Module):
"""
CCAM decoding module
"""
def __init__(self):
super(CCAMDecNew, self).__init__()
self.softmax = Softmax(dim=-1)
self.scale = Parameter(torch.zeros(1))
def forward(self, input_0, input_1):
primals_3 = self.scale
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tousifulhaque/DANet
|
CCAMDec
| false
| 4,464
|
[
"MIT"
] | 0
|
1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
https://github.com/tousifulhaque/DANet/tree/1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
Bandpass
|
import torch
import torch.nn as nn
class Bandpass(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.mean = nn.Parameter(torch.randn(1, input_dim, dtype=torch.float32)
)
self.icov = nn.Parameter(torch.eye(input_dim, input_dim, dtype=
torch.float32) * 2)
self.a = nn.Parameter(torch.tensor([2], dtype=torch.float32))
def forward(self, x):
self.a.data = torch.clamp(self.a.data, 0.01, 100)
x = x - self.mean
xm = torch.matmul(x.unsqueeze(1), self.icov)
xm = torch.matmul(xm, x.unsqueeze(2)).squeeze(1)
xm = torch.abs(xm)
xm = torch.exp(-xm ** self.a)
return xm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = 0.01
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 100.0
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex // 256
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_abs_exp_neg_pow_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp1 = tl_math.abs(tmp0)
tmp4 = libdevice.pow(tmp1, tmp3)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(1)](primals_1, buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_sub_1[grid(256)](primals_3, primals_2, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_2[grid(1024)](buf2, buf3, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_clone_3[grid(1024)](buf1, buf4, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_abs_exp_neg_pow_4[grid(1024)](buf5, buf0, buf6,
1024, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = torch.ops.aten.set_.source_Tensor(primals_1, buf0)
assert_size_stride(buf7, (1,), (1,))
del primals_1
return buf6, buf0, buf5, buf6, reinterpret_tensor(buf3, (64, 4, 4), (16,
1, 4), 0), reinterpret_tensor(buf4, (64, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0)
class BandpassNew(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.mean = nn.Parameter(torch.randn(1, input_dim, dtype=torch.float32)
)
self.icov = nn.Parameter(torch.eye(input_dim, input_dim, dtype=
torch.float32) * 2)
self.a = nn.Parameter(torch.tensor([2], dtype=torch.float32))
def forward(self, input_0):
primals_2 = self.mean
primals_4 = self.icov
primals_1 = self.a
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
tsumansapkota/Input-Invex-Neural-Network
|
Bandpass
| false
| 4,465
|
[
"Apache-2.0"
] | 0
|
6a14ee12b33da1d231d231c8f9631851a7668997
|
https://github.com/tsumansapkota/Input-Invex-Neural-Network/tree/6a14ee12b33da1d231d231c8f9631851a7668997
|
DQN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, obs_size, action_size, seed):
super(DQN, self).__init__()
self.fc1 = nn.Linear(obs_size, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, sum(action_size))
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'obs_size': 4, 'action_size': [4, 4], 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (8, 128), (128, 1))
assert_size_stride(primals_7, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 8), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), primals_6, buf5, primals_4, buf6
class DQNNew(nn.Module):
def __init__(self, obs_size, action_size, seed):
super(DQNNew, self).__init__()
self.fc1 = nn.Linear(obs_size, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, sum(action_size))
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ulyssesdotcodes/ReaL-Crowds
|
DQN
| false
| 4,466
|
[
"BSD-3-Clause"
] | 0
|
9da01fe4d1858c3c26d6387e34f4e76db5385d51
|
https://github.com/ulyssesdotcodes/ReaL-Crowds/tree/9da01fe4d1858c3c26d6387e34f4e76db5385d51
|
TSA_Fusion
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class TSA_Fusion(nn.Module):
""" Temporal Spatial Attention fusion module
Temporal: correlation;
Spatial: 3 pyramid levels.
"""
def __init__(self, nf=64, nframes=5, center=2):
super(TSA_Fusion, self).__init__()
self.center = center
self.tAtt_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.tAtt_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_fusion = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.sAtt_1 = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, aligned_fea):
B, N, C, H, W = aligned_fea.size()
emb_ref = self.tAtt_2(aligned_fea[:, self.center, :, :, :].clone())
emb = self.tAtt_1(aligned_fea.view(-1, C, H, W)).view(B, N, -1, H, W)
cor_l = []
for i in range(N):
emb_nbr = emb[:, i, :, :, :]
cor_tmp = torch.sum(emb_nbr * emb_ref, 1).unsqueeze(1)
cor_l.append(cor_tmp)
cor_prob = torch.sigmoid(torch.cat(cor_l, dim=1))
cor_prob = cor_prob.unsqueeze(2).repeat(1, 1, C, 1, 1).view(B, -1, H, W
)
aligned_fea = aligned_fea.view(B, -1, H, W) * cor_prob
fea = self.lrelu(self.fea_fusion(aligned_fea))
att = self.lrelu(self.sAtt_1(aligned_fea))
att_max = self.maxpool(att)
att_avg = self.avgpool(att)
att = self.lrelu(self.sAtt_2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L1(att))
att_max = self.maxpool(att_L)
att_avg = self.avgpool(att_L)
att_L = self.lrelu(self.sAtt_L2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L3(att_L))
att_L = F.interpolate(att_L, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.lrelu(self.sAtt_3(att))
att = att + att_L
att = self.lrelu(self.sAtt_4(att))
att = F.interpolate(att, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.sAtt_5(att)
att_add = self.sAtt_add_2(self.lrelu(self.sAtt_add_1(att)))
att = torch.sigmoid(att)
fea = fea * att * 2 + att_add
return fea
def get_inputs():
return [torch.rand([4, 5, 64, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 1024
x1 = xindex // 1024
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2048 + x0 + 5120 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused_cat_mul_sum_3(in_ptr0, in_ptr1, out_ptr5, out_ptr6,
out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 5120 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp13 = tl.load(in_ptr0 + (2048 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp19 = tl.load(in_ptr0 + (3072 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp25 = tl.load(in_ptr0 + (4096 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp8 = tmp7 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tmp13 * tmp1
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp19 * tmp1
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp26 = tmp25 * tmp1
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr5 + (x0 + 80 * x1), tmp6, xmask)
tl.store(out_ptr6 + (x0 + 80 * x1), tmp12, xmask)
tl.store(out_ptr7 + (x0 + 80 * x1), tmp18, xmask)
tl.store(out_ptr8 + (x0 + 80 * x1), tmp24, xmask)
tl.store(out_ptr9 + (x0 + 80 * x1), tmp30, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 320
x2 = xindex // 5120
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * (x1 // 64) + 80 * x2), None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x5 = xindex // 2
x3 = xindex // 256
x6 = xindex % 256
x7 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask,
eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) *
(2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 *
x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 +
2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)
)
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + 512 * x3), tmp51, xmask)
tl.store(out_ptr1 + x7, tmp76, xmask)
tl.store(out_ptr2 + (x6 + 512 * x3), tmp95, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tmp64 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp65 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy
='evict_last', other=0.0)
tmp66 = tmp65 + tmp64
tmp67 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy
='evict_last', other=0.0)
tmp68 = tmp67 + tmp66
tmp69 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy
='evict_last', other=0.0)
tmp70 = tmp69 + tmp68
tmp71 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp72 = tmp71 + tmp70
tmp73 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp74 = tmp73 + tmp72
tmp75 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp76 = tmp75 + tmp74
tmp77 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp78 = tmp77 + tmp76
tmp79 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp80 = tmp79 + tmp78
tmp81 = tl.full([1], 9, tl.int32)
tmp82 = tmp80 / tmp81
tl.store(out_ptr0 + (x0 + 128 * x1), tmp38, xmask)
tl.store(out_ptr1 + x2, tmp63, xmask)
tl.store(out_ptr2 + (x0 + 128 * x1), tmp82, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x5 = xindex // 4
x2 = xindex // 4 % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x6, xmask)
tmp26 = tl.load(in_ptr7 + x2, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tl.where(tmp7, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tl.where(tmp19, tmp18, tmp17)
tmp21 = tmp16 - tmp16
tmp23 = tmp21 * tmp22
tmp24 = tmp16 + tmp23
tmp27 = tmp25 + tmp26
tmp28 = tmp27 > tmp12
tmp29 = tmp27 * tmp14
tmp30 = tl.where(tmp28, tmp27, tmp29)
tmp32 = tmp31 + tmp1
tmp33 = tmp31 < 0
tl.where(tmp33, tmp32, tmp31)
tmp35 = tmp24 - tmp24
tmp37 = tmp35 * tmp36
tmp38 = tmp24 + tmp37
tmp39 = tmp30 + tmp38
tmp40 = tmp30 > tmp12
tl.store(in_out_ptr0 + x6, tmp39, xmask)
tl.store(out_ptr0 + x6, tmp40, xmask)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex // 16
x2 = xindex // 16 % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + 2 * tmp33 + 4 * x6), None,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + 2 * tmp33 + 4 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tl.store(in_out_ptr0 + x4, tmp50, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x3, None)
tmp13 = tl.load(in_out_ptr1 + x3, None)
tmp14 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp7 * tmp9
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 + tmp15
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(in_out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (4, 5, 64, 4, 4), (5120, 1024, 16, 4, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 320, 1, 1), (320, 1, 1, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 320, 1, 1), (320, 1, 1, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (64,), (1,))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_27, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4096)](primals_1, buf0, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(4096)](buf2, primals_3, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (20,
64, 4, 4), (1024, 16, 4, 1), 0), primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (20, 64, 4, 4), (1024, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(20480)](buf4, primals_5, 20480,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf10 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0)
buf11 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 16)
buf12 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 32)
buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 48)
buf14 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 64)
triton_per_fused_cat_mul_sum_3[grid(64)](buf4, buf2, buf10, buf11,
buf12, buf13, buf14, 64, 64, XBLOCK=32, num_warps=8, num_stages=1)
buf16 = empty_strided_cuda((4, 320, 4, 4), (5120, 16, 4, 1), torch.
float32)
triton_poi_fused_mul_4[grid(20480)](primals_1, buf15, buf16, 20480,
XBLOCK=256, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 4, 4), (1024, 16, 4, 1))
buf19 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 64, 4, 4), (1024, 16, 4, 1))
buf20 = buf19
del buf19
triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf20,
primals_9, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 4, 2, 1), torch.
float32)
buf21 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 0)
buf22 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.int8)
buf23 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 256)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6[grid(1024)](buf20
, buf21, buf22, buf23, 1024, XBLOCK=128, num_warps=4, num_stages=1)
buf25 = extern_kernels.convolution(buf24, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 2, 2), (256, 4, 2, 1))
buf26 = buf25
del buf25
triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf26,
primals_11, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 64, 2, 2), (256, 4, 2, 1))
buf28 = buf27
del buf27
triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf28,
primals_13, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf29 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 0)
buf30 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8)
buf31 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 64)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8[grid(256)](buf28,
buf29, buf30, buf31, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 1, 1))
buf34 = buf33
del buf33
triton_poi_fused_convolution_leaky_relu_9[grid(256)](buf34,
primals_15, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 1, 1))
buf36 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_10[grid(2)](buf36, 2, XBLOCK=2, num_warps
=1, num_stages=1)
buf37 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_11[grid(2)](buf37, 2, XBLOCK=2,
num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused__to_copy_10[grid(2)](buf38, 2, XBLOCK=2, num_warps
=1, num_stages=1)
buf39 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused_add_clamp_11[grid(2)](buf39, 2, XBLOCK=2,
num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((2,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf40,
2, XBLOCK=2, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf42,
2, XBLOCK=2, num_warps=1, num_stages=1)
buf43 = extern_kernels.convolution(buf26, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 64, 2, 2), (256, 4, 2, 1))
buf41 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.float32
)
buf44 = buf41
del buf41
buf62 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool)
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13[
grid(1024)](buf44, buf36, buf38, buf35, primals_17, buf39,
buf40, buf43, primals_19, buf37, buf42, buf62, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del buf43
del primals_19
buf45 = extern_kernels.convolution(buf44, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 64, 2, 2), (256, 4, 2, 1))
buf46 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_14[grid(4)](buf46, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf47 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_15[grid(4)](buf47, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_14[grid(4)](buf48, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf49 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_15[grid(4)](buf49, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf50 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf50,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf52 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf52,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf53 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
buf54 = buf53
del buf53
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17[
grid(4096)](buf54, buf46, buf48, buf45, primals_21, buf49,
buf50, buf47, buf52, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf55 = extern_kernels.convolution(buf54, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 64, 4, 4), (1024, 16, 4, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_1[grid(4096)](buf56, primals_23, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf57 = extern_kernels.convolution(buf56, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 64, 4, 4), (1024, 16, 4, 1))
buf58 = buf57
del buf57
triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf58,
primals_25, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_25
buf59 = extern_kernels.convolution(buf58, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 64, 4, 4), (1024, 16, 4, 1))
buf18 = buf17
del buf17
buf60 = buf59
del buf59
triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18[grid(4096)](
buf18, buf60, primals_7, buf56, primals_27, 4096, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_27
del primals_7
buf61 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19[grid
(1024)](buf45, primals_21, buf61, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del buf45
del primals_21
buf63 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20[grid
(256)](buf35, primals_17, buf63, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf35
del primals_17
return (buf60, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, buf0, buf2,
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 0),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 1024),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 2048),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 3072),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 4096),
buf15, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30,
buf32, buf34, buf36, buf37, buf38, buf39, buf40, buf42, buf44,
buf46, buf47, buf48, buf49, buf50, buf52, buf54, buf56, buf58,
buf61, buf62, buf63)
class TSA_FusionNew(nn.Module):
""" Temporal Spatial Attention fusion module
Temporal: correlation;
Spatial: 3 pyramid levels.
"""
def __init__(self, nf=64, nframes=5, center=2):
super(TSA_FusionNew, self).__init__()
self.center = center
self.tAtt_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.tAtt_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_fusion = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.sAtt_1 = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input_0):
primals_2 = self.tAtt_1.weight
primals_3 = self.tAtt_1.bias
primals_4 = self.tAtt_2.weight
primals_5 = self.tAtt_2.bias
primals_6 = self.fea_fusion.weight
primals_7 = self.fea_fusion.bias
primals_8 = self.sAtt_1.weight
primals_9 = self.sAtt_1.bias
primals_10 = self.sAtt_2.weight
primals_11 = self.sAtt_2.bias
primals_16 = self.sAtt_3.weight
primals_13 = self.sAtt_3.bias
primals_12 = self.sAtt_4.weight
primals_15 = self.sAtt_4.bias
primals_18 = self.sAtt_5.weight
primals_17 = self.sAtt_5.bias
primals_20 = self.sAtt_L1.weight
primals_19 = self.sAtt_L1.bias
primals_14 = self.sAtt_L2.weight
primals_21 = self.sAtt_L2.bias
primals_22 = self.sAtt_L3.weight
primals_23 = self.sAtt_L3.bias
primals_24 = self.sAtt_add_1.weight
primals_25 = self.sAtt_add_1.bias
primals_26 = self.sAtt_add_2.weight
primals_27 = self.sAtt_add_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
|
sutkarsh/EDVR
|
TSA_Fusion
| false
| 4,467
|
[
"Apache-2.0"
] | 0
|
cd9f2d46edbb00333d8ffb31aebc52cfbda4b6e3
|
https://github.com/sutkarsh/EDVR/tree/cd9f2d46edbb00333d8ffb31aebc52cfbda4b6e3
|
LenCompLoss
|
import torch
import torch.utils.data
import torch
import torch.nn as nn
class LenCompLoss(nn.Module):
def __init__(self):
super(LenCompLoss, self).__init__()
self.loss = nn.L1Loss()
def forward(self, x, y):
loss = self.loss(torch.sum(x), torch.sum(y))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tmp3 - tmp7
tmp9 = tl_math.abs(tmp8)
tmp10 = 1.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class LenCompLossNew(nn.Module):
def __init__(self):
super(LenCompLossNew, self).__init__()
self.loss = nn.L1Loss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
usmanwardag/pytorch-CycleGAN-and-pix2pix
|
LenCompLoss
| false
| 4,468
|
[
"BSD-3-Clause"
] | 0
|
72f2050600e7821476c9e19fcf8f1973f6a6f78c
|
https://github.com/usmanwardag/pytorch-CycleGAN-and-pix2pix/tree/72f2050600e7821476c9e19fcf8f1973f6a6f78c
|
FluidGravityForce
|
import torch
import torch.nn as nn
class FluidGravityForce(nn.Module):
def __init__(self, gravity, maxSpeed=3):
"""
Initializes a fluid gravity model.
Arguments:
gravity: Gravity vector in the global frame (same as particle l) for the simulation
maxSpeed: The maximum magnitude of the particle velocities. Higher velocities are clamped.
Previous work used: MAX_VEL = 0.5*0.1*NSUBSTEPS/DT
"""
super(FluidGravityForce, self).__init__()
self.gravity = gravity
self.maxSpeed = maxSpeed
self.relu = nn.ReLU()
def _cap_magnitude(self, A, cap):
d = len(A.size())
vv = torch.norm(A, 2, d - 1, keepdim=True)
vv = cap / (vv + 0.0001)
vv = -(self.relu(-vv + 1.0) - 1.0)
return A * vv
def forward(self, locs, vel, dt):
"""
Applies gravity force to fluid sim
Inputs:
locs: A BxNx3 tensor where B is the batch size, N is the number of particles.
The tensor contains the locations of every particle.
vels: A BxNx3 tensor that contains the velocity of every particle
dt: timestep to predict for
gravity: 1x1x3 tensor containing the direction of gravity in the same coordinate frame as particles
maxSpeed: maximum velocity possible for nay particle
Returns:
locs: A BxNx3 tensor with the new particle positions
vel: A BxNx3 tensor with the new particle velocities
"""
vel = vel + self.gravity * dt
vel = self._cap_magnitude(vel, self.maxSpeed)
locs = locs + vel * dt
return locs, vel
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gravity': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_linalg_vector_norm_mul_neg_reciprocal_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp7 * tmp2
tmp9 = tmp6 + tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp13 * tmp2
tmp15 = tmp12 + tmp14
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tmp18 + tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 0.0001
tmp26 = tmp24 + tmp25
tmp27 = tl.full([1], 1, tl.int32)
tmp28 = tmp27 / tmp26
tmp29 = 3.0
tmp30 = tmp28 * tmp29
tmp31 = -tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_mul_neg_relu_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr3 + x2, xmask)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tmp9 - tmp6
tmp11 = -tmp10
tmp12 = tmp4 * tmp11
tmp14 = tmp12 * tmp1
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp12, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_linalg_vector_norm_mul_neg_reciprocal_0[grid(64)](
arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_neg_relu_sub_1[grid(256)](arg1_1, arg0_1,
buf0, arg2_1, buf1, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del buf0
return buf2, buf1
class FluidGravityForceNew(nn.Module):
def __init__(self, gravity, maxSpeed=3):
"""
Initializes a fluid gravity model.
Arguments:
gravity: Gravity vector in the global frame (same as particle l) for the simulation
maxSpeed: The maximum magnitude of the particle velocities. Higher velocities are clamped.
Previous work used: MAX_VEL = 0.5*0.1*NSUBSTEPS/DT
"""
super(FluidGravityForceNew, self).__init__()
self.gravity = gravity
self.maxSpeed = maxSpeed
self.relu = nn.ReLU()
def _cap_magnitude(self, A, cap):
d = len(A.size())
vv = torch.norm(A, 2, d - 1, keepdim=True)
vv = cap / (vv + 0.0001)
vv = -(self.relu(-vv + 1.0) - 1.0)
return A * vv
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
ucsdarclab/liquid_reconstruction
|
FluidGravityForce
| false
| 4,469
|
[
"MIT"
] | 0
|
5559edbf71dba05d432d85e7dbbfe3634e650aeb
|
https://github.com/ucsdarclab/liquid_reconstruction/tree/5559edbf71dba05d432d85e7dbbfe3634e650aeb
|
KLDivergence
|
import torch
import torch as th
class KLDivergence(th.nn.Module):
"""
Args:
min_value(float): the loss is clipped so that value below this
number don't affect the optimization.
"""
def __init__(self, min_value=0.2):
super(KLDivergence, self).__init__()
self.min_value = min_value
def forward(self, mu, log_sigma):
loss = -0.5 * (1.0 + log_sigma - mu.pow(2) - log_sigma.exp())
loss = loss.mean()
loss = th.max(loss, self.min_value * th.ones_like(loss))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_maximum_mean_mul_pow_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 - tmp4
tmp6 = tl_math.exp(tmp0)
tmp7 = tmp5 - tmp6
tmp8 = -0.5
tmp9 = tmp7 * tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tmp15 = 0.20000000298023224
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_exp_maximum_mean_mul_pow_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLDivergenceNew(th.nn.Module):
"""
Args:
min_value(float): the loss is clipped so that value below this
number don't affect the optimization.
"""
def __init__(self, min_value=0.2):
super(KLDivergenceNew, self).__init__()
self.min_value = min_value
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
v-a-s-a/diffvg
|
KLDivergence
| false
| 4,470
|
[
"Apache-2.0"
] | 0
|
3685f3d47a5a4e5c76c68643ebf383f809ba59ed
|
https://github.com/v-a-s-a/diffvg/tree/3685f3d47a5a4e5c76c68643ebf383f809ba59ed
|
BMNLoss
|
import torch
import torch.nn.functional as F
import torch.nn as nn
def binary_logistic_regression_loss(reg_score, label, threshold=0.5,
ratio_range=(1.05, 21), eps=1e-05):
"""Binary Logistic Regression Loss."""
label = label.view(-1)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float()
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask
) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
class BMNLoss(nn.Module):
"""BMN Loss.
From paper https://arxiv.org/abs/1907.09702,
code https://github.com/JJBOY/BMN-Boundary-Matching-Network.
It will calculate loss for BMN Model. This loss is a weighted sum of
1) temporal evaluation loss based on confidence score of start and
end positions.
2) proposal evaluation regression loss based on confidence scores of
candidate proposals.
3) proposal evaluation classification loss based on classification
results of candidate proposals.
"""
@staticmethod
def tem_loss(pred_start, pred_end, gt_start, gt_end):
"""Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
loss_start = binary_logistic_regression_loss(pred_start, gt_start)
loss_end = binary_logistic_regression_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
@staticmethod
def pem_reg_loss(pred_score, gt_iou_map, mask,
high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3):
"""Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evalutaion regression loss.
"""
u_hmask = (gt_iou_map > high_temporal_iou_threshold).float()
u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & (
gt_iou_map > low_temporal_iou_threshold)).float()
u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & (gt_iou_map >
0.0)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.rand_like(gt_iou_map)
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > 1.0 - r_m).float()
r_l = num_h / num_l
u_slmask = torch.rand_like(gt_iou_map)
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > 1.0 - r_l).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(loss * torch.ones_like(weights)) / torch.sum(
weights)
return loss
@staticmethod
def pem_cls_loss(pred_score, gt_iou_map, mask, threshold=0.9,
ratio_range=(1.05, 21), eps=1e-05):
"""Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evalutaion classification loss.
"""
pmask = (gt_iou_map > threshold).float()
nmask = (gt_iou_map <= threshold).float()
nmask = nmask * mask
num_positive = max(torch.sum(pmask), 1)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss_pos = coef_1 * torch.log(pred_score + eps) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
def forward(self, pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end, bm_mask, weight_tem=1.0, weight_pem_reg=10.0,
weight_pem_cls=1.0):
"""Calculate Boundary Matching Network Loss.
Args:
pred_bm (torch.Tensor): Predicted confidence score for boundary
matching map.
pred_start (torch.Tensor): Predicted confidence score for start.
pred_end (torch.Tensor): Predicted confidence score for end.
gt_iou_map (torch.Tensor): Groundtruth score for boundary matching
map.
gt_start (torch.Tensor): Groundtruth temporal_iou score for start.
gt_end (torch.Tensor): Groundtruth temporal_iou score for end.
bm_mask (torch.Tensor): Boundary-Matching mask.
weight_tem (float): Weight for tem loss. Default: 1.0.
weight_pem_reg (float): Weight for pem regression loss.
Default: 10.0.
weight_pem_cls (float): Weight for pem classification loss.
Default: 1.0.
Returns:
tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]):
(loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn
loss, tem_loss is the temporal evaluation loss, pem_reg_loss is
the proposal evaluation regression loss, pem_cls_loss is the
proposal evaluation classification loss.
"""
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end)
loss = (weight_tem * tem_loss + weight_pem_reg * pem_reg_loss +
weight_pem_cls * pem_cls_loss)
return loss, tem_loss, pem_reg_loss, pem_cls_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0(
in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr12, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16 % 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp19 = tl.load(in_ptr1 + r0, None)
tmp36 = tl.load(in_ptr2 + r0, None)
tmp37 = tl.load(in_ptr3 + r0, None)
tmp62 = tl.load(in_ptr4 + r0, None)
tmp69 = tl.load(in_out_ptr0 + r0, None)
tmp76 = tl.load(in_ptr5 + (r1 + 64 * r2), None, eviction_policy=
'evict_last')
tmp110 = tl.load(in_ptr5 + (16 + r1 + 64 * r2), None, eviction_policy=
'evict_last')
tmp126 = tl.load(in_ptr6 + r0, None)
tmp139 = tl.load(in_ptr7 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp9 / tmp8
tmp11 = 256.0
tmp12 = tmp10 * tmp11
tmp13 = 1.05
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = 21.0
tmp16 = triton_helpers.minimum(tmp14, tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp3
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = tl_math.log(tmp21)
tmp23 = tmp18 * tmp22
tmp24 = tmp16 - tmp7
tmp25 = tmp17 / tmp24
tmp26 = tmp7 - tmp3
tmp27 = tmp25 * tmp26
tmp28 = tmp7 - tmp19
tmp29 = tmp28 + tmp20
tmp30 = tl_math.log(tmp29)
tmp31 = tmp27 * tmp30
tmp32 = tmp23 + tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp38 = tmp36 * tmp37
tmp39 = 0.7
tmp40 = tmp38 > tmp39
tmp41 = tmp40.to(tl.float32)
tmp42 = tl.broadcast_to(tmp41, [RBLOCK])
tmp44 = triton_helpers.promote_to_tensor(tl.sum(tmp42, 0))
tmp45 = tmp38 <= tmp39
tmp46 = 0.3
tmp47 = tmp38 > tmp46
tmp48 = tmp45 & tmp47
tmp49 = tmp48.to(tl.float32)
tmp50 = tl.broadcast_to(tmp49, [RBLOCK])
tmp52 = triton_helpers.promote_to_tensor(tl.sum(tmp50, 0))
tmp53 = tmp38 <= tmp46
tmp54 = 0.0
tmp55 = tmp38 > tmp54
tmp56 = tmp53 & tmp55
tmp57 = tmp56.to(tl.float32)
tmp58 = tmp57 * tmp37
tmp59 = tl.broadcast_to(tmp58, [RBLOCK])
tmp61 = triton_helpers.promote_to_tensor(tl.sum(tmp59, 0))
tmp63 = tmp49 * tmp62
tmp64 = tmp44 / tmp52
tmp65 = tmp7 - tmp64
tmp66 = tmp63 > tmp65
tmp67 = tmp66.to(tl.float32)
tmp68 = tmp41 + tmp67
tmp70 = tmp58 * tmp69
tmp71 = tmp44 / tmp61
tmp72 = tmp7 - tmp71
tmp73 = tmp70 > tmp72
tmp74 = tmp73.to(tl.float32)
tmp75 = tmp68 + tmp74
tmp77 = tmp76 * tmp75
tmp78 = tmp38 * tmp75
tmp79 = tmp77 - tmp78
tmp80 = tmp79 * tmp79
tmp81 = tl.broadcast_to(tmp80, [RBLOCK])
tmp83 = triton_helpers.promote_to_tensor(tl.sum(tmp81, 0))
tmp84 = 0.9
tmp85 = tmp38 > tmp84
tmp86 = tmp85.to(tl.float32)
tmp87 = tl.broadcast_to(tmp86, [RBLOCK])
tmp89 = triton_helpers.promote_to_tensor(tl.sum(tmp87, 0))
tmp90 = tmp38 <= tmp84
tmp91 = tmp90.to(tl.float32)
tmp92 = tmp91 * tmp37
tmp93 = tl.broadcast_to(tmp92, [RBLOCK])
tmp95 = triton_helpers.promote_to_tensor(tl.sum(tmp93, 0))
tmp96 = tl.broadcast_to(tmp75, [RBLOCK])
tmp98 = triton_helpers.promote_to_tensor(tl.sum(tmp96, 0))
tmp99 = tmp83 / tmp11
tmp100 = tmp99 * tmp7
tmp101 = tl.broadcast_to(tmp100, [RBLOCK])
tmp103 = triton_helpers.promote_to_tensor(tl.sum(tmp101, 0))
tmp104 = triton_helpers.maximum(tmp89, tmp7)
tmp105 = tmp104 + tmp95
tmp106 = tmp105 / tmp104
tmp107 = triton_helpers.maximum(tmp106, tmp13)
tmp108 = triton_helpers.minimum(tmp107, tmp15)
tmp109 = tmp108 * tmp1
tmp111 = tmp110 + tmp20
tmp112 = tl_math.log(tmp111)
tmp113 = tmp109 * tmp112
tmp114 = tmp113 * tmp86
tmp115 = tmp108 - tmp7
tmp116 = tmp109 / tmp115
tmp117 = tmp7 - tmp110
tmp118 = tmp117 + tmp20
tmp119 = tl_math.log(tmp118)
tmp120 = tmp116 * tmp119
tmp121 = tmp120 * tmp92
tmp122 = tmp114 + tmp121
tmp123 = tl.broadcast_to(tmp122, [RBLOCK])
tmp125 = triton_helpers.promote_to_tensor(tl.sum(tmp123, 0))
tmp127 = tmp126 > tmp1
tmp128 = tmp127.to(tl.float32)
tmp129 = tl.broadcast_to(tmp128, [RBLOCK])
tmp131 = triton_helpers.promote_to_tensor(tl.sum(tmp129, 0))
tmp132 = triton_helpers.maximum(tmp131, tmp7)
tmp133 = tmp9 / tmp132
tmp134 = tmp133 * tmp11
tmp135 = triton_helpers.maximum(tmp134, tmp13)
tmp136 = triton_helpers.minimum(tmp135, tmp15)
tmp137 = tmp136 * tmp1
tmp138 = tmp137 * tmp128
tmp140 = tmp139 + tmp20
tmp141 = tl_math.log(tmp140)
tmp142 = tmp138 * tmp141
tmp143 = tmp136 - tmp7
tmp144 = tmp137 / tmp143
tmp145 = tmp7 - tmp128
tmp146 = tmp144 * tmp145
tmp147 = tmp7 - tmp139
tmp148 = tmp147 + tmp20
tmp149 = tl_math.log(tmp148)
tmp150 = tmp146 * tmp149
tmp151 = tmp142 + tmp150
tmp152 = tl.broadcast_to(tmp151, [RBLOCK])
tmp154 = triton_helpers.promote_to_tensor(tl.sum(tmp152, 0))
tmp155 = tmp35 / tmp11
tmp156 = -tmp155
tmp157 = tmp154 / tmp11
tmp158 = -tmp157
tmp159 = tmp156 + tmp158
tmp160 = tmp103 * tmp1
tmp161 = tmp160 / tmp98
tmp162 = -1.0
tmp163 = tmp125 * tmp162
tmp164 = tmp163 / tmp105
tmp165 = tmp159 * tmp7
tmp166 = 10.0
tmp167 = tmp161 * tmp166
tmp168 = tmp165 + tmp167
tmp169 = tmp164 * tmp7
tmp170 = tmp168 + tmp169
tl.debug_barrier()
tl.store(in_out_ptr2 + tl.full([1], 0, tl.int32), tmp159, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp161, None)
tl.debug_barrier()
tl.store(in_out_ptr3 + tl.full([1], 0, tl.int32), tmp164, None)
tl.store(out_ptr12 + tl.full([1], 0, tl.int32), tmp170, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf11 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf12 = buf11
del buf11
buf7 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf8 = buf7
del buf7
buf2 = empty_strided_cuda((), (), torch.float32)
buf14 = buf12
del buf12
buf15 = empty_strided_cuda((), (), torch.float32)
buf16 = buf15
del buf15
buf22 = empty_strided_cuda((), (), torch.float32)
buf6 = buf2
del buf2
buf18 = buf16
del buf16
buf23 = buf22
del buf22
buf24 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0[
grid(1)](buf14, buf18, buf6, buf23, arg4_1, arg3_1, arg1_1,
arg2_1, buf8, arg0_1, arg6_1, arg5_1, buf24, 1, 256, num_warps=
2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
del buf14
del buf8
return buf24, buf6, buf18, buf23
def binary_logistic_regression_loss(reg_score, label, threshold=0.5,
ratio_range=(1.05, 21), eps=1e-05):
"""Binary Logistic Regression Loss."""
label = label.view(-1)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float()
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask
) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
class BMNLossNew(nn.Module):
"""BMN Loss.
From paper https://arxiv.org/abs/1907.09702,
code https://github.com/JJBOY/BMN-Boundary-Matching-Network.
It will calculate loss for BMN Model. This loss is a weighted sum of
1) temporal evaluation loss based on confidence score of start and
end positions.
2) proposal evaluation regression loss based on confidence scores of
candidate proposals.
3) proposal evaluation classification loss based on classification
results of candidate proposals.
"""
@staticmethod
def tem_loss(pred_start, pred_end, gt_start, gt_end):
"""Calculate Temporal Evaluation Module Loss.
This function calculate the binary_logistic_regression_loss for start
and end respectively and returns the sum of their losses.
Args:
pred_start (torch.Tensor): Predicted start score by BMN model.
pred_end (torch.Tensor): Predicted end score by BMN model.
gt_start (torch.Tensor): Groundtruth confidence score for start.
gt_end (torch.Tensor): Groundtruth confidence score for end.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
loss_start = binary_logistic_regression_loss(pred_start, gt_start)
loss_end = binary_logistic_regression_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
@staticmethod
def pem_reg_loss(pred_score, gt_iou_map, mask,
high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3):
"""Calculate Proposal Evaluation Module Regression Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
high_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.7.
low_temporal_iou_threshold (float): Higher threshold of
temporal_iou. Default: 0.3.
Returns:
torch.Tensor: Proposal evalutaion regression loss.
"""
u_hmask = (gt_iou_map > high_temporal_iou_threshold).float()
u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & (
gt_iou_map > low_temporal_iou_threshold)).float()
u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & (gt_iou_map >
0.0)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.rand_like(gt_iou_map)
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > 1.0 - r_m).float()
r_l = num_h / num_l
u_slmask = torch.rand_like(gt_iou_map)
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > 1.0 - r_l).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(loss * torch.ones_like(weights)) / torch.sum(
weights)
return loss
@staticmethod
def pem_cls_loss(pred_score, gt_iou_map, mask, threshold=0.9,
ratio_range=(1.05, 21), eps=1e-05):
"""Calculate Proposal Evaluation Module Classification Loss.
Args:
pred_score (torch.Tensor): Predicted temporal_iou score by BMN.
gt_iou_map (torch.Tensor): Groundtruth temporal_iou score.
mask (torch.Tensor): Boundary-Matching mask.
threshold (float): Threshold of temporal_iou for positive
instances. Default: 0.9.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5
Returns:
torch.Tensor: Proposal evalutaion classification loss.
"""
pmask = (gt_iou_map > threshold).float()
nmask = (gt_iou_map <= threshold).float()
nmask = nmask * mask
num_positive = max(torch.sum(pmask), 1)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss_pos = coef_1 * torch.log(pred_score + eps) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return output[0], output[1], output[2], output[3]
|
scenarios/dev
|
BMNLoss
| false
| 4,471
|
[
"Apache-2.0"
] | 0
|
9f91ebc142cea1c31231d233571ad59460ab6fba
|
https://github.com/scenarios/dev/tree/9f91ebc142cea1c31231d233571ad59460ab6fba
|
MaxPPVPool1d
|
from torch.nn import Module
import torch
import torch.multiprocessing
import torch
class MaxPPVPool1d(Module):
"""Drop-in replacement for AdaptiveConcatPool1d - multiplies nf by 2"""
def forward(self, x):
_max = x.max(dim=-1).values
_ppv = torch.gt(x, 0).sum(dim=-1).float() / x.shape[-1]
return torch.cat((_max, _ppv), dim=-1).unsqueeze(2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
import torch.multiprocessing
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr0 + (4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = 0.0
tmp19 = tmp17 > tmp18
tmp20 = tmp19.to(tl.int64)
tmp21 = tl.load(in_ptr0 + (1 + 4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 > tmp18
tmp23 = tmp22.to(tl.int64)
tmp24 = tmp20 + tmp23
tmp25 = tl.load(in_ptr0 + (2 + 4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp26 = tmp25 > tmp18
tmp27 = tmp26.to(tl.int64)
tmp28 = tmp24 + tmp27
tmp29 = tl.load(in_ptr0 + (3 + 4 * (-4 + x0) + 16 * x1), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tmp29 > tmp18
tmp31 = tmp30.to(tl.int64)
tmp32 = tmp28 + tmp31
tmp33 = tmp32.to(tl.float32)
tmp34 = 0.25
tmp35 = tmp33 * tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp14, tmp35, tmp36)
tmp38 = tl.where(tmp4, tmp13, tmp37)
tl.store(out_ptr0 + x2, tmp38, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 1, 8), (32, 8, 8, 1), 0),
class MaxPPVPool1dNew(Module):
"""Drop-in replacement for AdaptiveConcatPool1d - multiplies nf by 2"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
sjdlloyd/tsai
|
MaxPPVPool1d
| false
| 4,472
|
[
"Apache-2.0"
] | 0
|
98d9c02b8429708819d373b475deb9e99f0ab7df
|
https://github.com/sjdlloyd/tsai/tree/98d9c02b8429708819d373b475deb9e99f0ab7df
|
ScoringFunction
|
import torch
import torch.nn as nn
class Conv2dAct(nn.Module):
def __init__(self, in_channels, out_channels, ksize=1, activation='relu'):
super(Conv2dAct, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, ksize)
if activation == 'sigmoid':
self.act = nn.Sigmoid()
elif activation == 'relu':
self.act = nn.ReLU()
elif activation == 'tanh':
self.act = nn.Tanh()
def forward(self, x):
x = self.conv(x)
x = self.act(x)
return x
class VarianceC(nn.Module):
def __init__(self):
super(VarianceC, self).__init__()
def forward(self, x):
mean_x = torch.mean(x, dim=1, keepdim=True)
sub_x = x.sub(mean_x)
x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True)
return x
class ScoringFunction(nn.Module):
def __init__(self, in_channels, var=False):
super(ScoringFunction, self).__init__()
if var:
self.reduce_channel = VarianceC()
else:
self.reduce_channel = Conv2dAct(in_channels, 1, 1, 'sigmoid')
def forward(self, x):
x = self.reduce_channel(x)
x = x.view(x.size(0), -1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_sigmoid_sigmoid_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = 1.0
tmp6 = tmp5 - tmp4
tmp7 = tmp4 * tmp6
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_sigmoid_backward_0[grid(64)](buf1,
primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 16), (16, 1), 0
), primals_1, primals_3, buf2
class Conv2dAct(nn.Module):
def __init__(self, in_channels, out_channels, ksize=1, activation='relu'):
super(Conv2dAct, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, ksize)
if activation == 'sigmoid':
self.act = nn.Sigmoid()
elif activation == 'relu':
self.act = nn.ReLU()
elif activation == 'tanh':
self.act = nn.Tanh()
def forward(self, x):
x = self.conv(x)
x = self.act(x)
return x
class VarianceC(nn.Module):
def __init__(self):
super(VarianceC, self).__init__()
def forward(self, x):
mean_x = torch.mean(x, dim=1, keepdim=True)
sub_x = x.sub(mean_x)
x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True)
return x
class ScoringFunctionNew(nn.Module):
def __init__(self, in_channels, var=False):
super(ScoringFunctionNew, self).__init__()
if var:
self.reduce_channel = VarianceC()
else:
self.reduce_channel = Conv2dAct(in_channels, 1, 1, 'sigmoid')
def forward(self, input_0):
primals_1 = self.reduce_channel.conv.weight
primals_2 = self.reduce_channel.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
sunwhawhang/headpose-fsanet-pytorch
|
ScoringFunction
| false
| 4,473
|
[
"MIT"
] | 0
|
d37d39dbff649b2f607367f35d9eadba2fea18f7
|
https://github.com/sunwhawhang/headpose-fsanet-pytorch/tree/d37d39dbff649b2f607367f35d9eadba2fea18f7
|
CrossEntropy
|
import torch
import torchvision.transforms.functional as F
import torch.nn as nn
import torch.nn.functional as F
class CrossEntropy(nn.Module):
def forward(self, x, y):
return F.cross_entropy(x, torch.argmax(y, -1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused_argmax_nll_loss2d_forward_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr1 + (r1 + 64 * r2), None)
tmp58 = tl.load(in_ptr1 + (16 + r1 + 64 * r2), None)
tmp61 = tl.load(in_ptr1 + (32 + r1 + 64 * r2), None)
tmp64 = tl.load(in_ptr1 + (48 + r1 + 64 * r2), None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4),
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (r1 + 16 * tmp53 + 64 * r2), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp79 = tmp78.to(tl.float32)
tmp80 = tmp74 / tmp79
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp80, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_argmax_nll_loss2d_forward_1[grid(1)](buf4, arg0_1,
buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf1
return buf4,
class CrossEntropyNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tgxs002/1-stage-wseg
|
CrossEntropy
| false
| 4,474
|
[
"Apache-2.0"
] | 0
|
de16c51cc6cf8cd0ef248145980434d5f6104910
|
https://github.com/tgxs002/1-stage-wseg/tree/de16c51cc6cf8cd0ef248145980434d5f6104910
|
Gaussian
|
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data
class Gaussian(nn.Module):
def __init__(self, in_dim, z_dim):
super(Gaussian, self).__init__()
self.mu = nn.Linear(in_dim, z_dim)
self.var = nn.Linear(in_dim, z_dim)
def reparameterize(self, mu, var):
std = torch.sqrt(var + 1e-10)
noise = torch.randn_like(std)
z = mu + noise * std
return z
def forward(self, x):
mu = self.mu(x)
var = F.softplus(self.var(x))
z = self.reparameterize(mu, var)
return mu, var, z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'z_dim': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_softplus_sqrt_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp8 = 1e-10
tmp9 = tmp5 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp7 * tmp10
tmp12 = tmp6 + tmp11
tl.store(out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf3 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf4 = buf3
del buf3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_softplus_sqrt_0[grid(256)](buf1, buf0,
buf4, buf2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf2, buf4
class GaussianNew(nn.Module):
def __init__(self, in_dim, z_dim):
super(GaussianNew, self).__init__()
self.mu = nn.Linear(in_dim, z_dim)
self.var = nn.Linear(in_dim, z_dim)
def reparameterize(self, mu, var):
std = torch.sqrt(var + 1e-10)
noise = torch.randn_like(std)
z = mu + noise * std
return z
def forward(self, input_0):
primals_1 = self.mu.weight
primals_2 = self.mu.bias
primals_4 = self.var.weight
primals_5 = self.var.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
|
userVector/GMVAE
|
Gaussian
| false
| 4,475
|
[
"MIT"
] | 0
|
2d0330c4174aa614f3817888798f88798313e01f
|
https://github.com/userVector/GMVAE/tree/2d0330c4174aa614f3817888798f88798313e01f
|
VarianceC
|
import torch
import torch.nn as nn
class VarianceC(nn.Module):
def __init__(self):
super(VarianceC, self).__init__()
def forward(self, x):
mean_x = torch.mean(x, dim=1, keepdim=True)
sub_x = x.sub(mean_x)
x = torch.mean(torch.mul(sub_x, sub_x), dim=1, keepdim=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_mul_sub_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class VarianceCNew(nn.Module):
def __init__(self):
super(VarianceCNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
sunwhawhang/headpose-fsanet-pytorch
|
VarianceC
| false
| 4,476
|
[
"MIT"
] | 0
|
d37d39dbff649b2f607367f35d9eadba2fea18f7
|
https://github.com/sunwhawhang/headpose-fsanet-pytorch/tree/d37d39dbff649b2f607367f35d9eadba2fea18f7
|
ToyRes
|
import torch
import torch.nn as nn
import torch.multiprocessing
class ToyResLayer(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self):
super().__init__()
aprime = torch.Tensor(1)
bprime = torch.Tensor(1)
self.aprime = nn.Parameter(aprime)
self.bprime = nn.Parameter(bprime)
nn.init.uniform_(self.aprime)
nn.init.uniform_(self.bprime)
def forward(self, x):
w = self.aprime ** 3 * (self.aprime - 3 * self.bprime + 27 * self.
bprime ** 3)
return x * w
class ToyRes(nn.Module):
def __init__(self):
super().__init__()
self.ToyResLayer = ToyResLayer()
def forward(self, x):
return self.ToyResLayer(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp2
tmp7 = 3.0
tmp8 = tmp6 * tmp7
tmp9 = tmp2 - tmp8
tmp10 = tmp6 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = 27.0
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp15 = tmp4 * tmp14
tmp16 = tmp0 * tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_0[grid(256)](primals_3, primals_1,
primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3
class ToyResLayer(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self):
super().__init__()
aprime = torch.Tensor(1)
bprime = torch.Tensor(1)
self.aprime = nn.Parameter(aprime)
self.bprime = nn.Parameter(bprime)
nn.init.uniform_(self.aprime)
nn.init.uniform_(self.bprime)
def forward(self, x):
w = self.aprime ** 3 * (self.aprime - 3 * self.bprime + 27 * self.
bprime ** 3)
return x * w
class ToyResNew(nn.Module):
def __init__(self):
super().__init__()
self.ToyResLayer = ToyResLayer()
def forward(self, input_0):
primals_1 = self.ToyResLayer.aprime
primals_2 = self.ToyResLayer.bprime
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
suswei/RLCT
|
ToyRes
| false
| 4,477
|
[
"MIT"
] | 0
|
e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
https://github.com/suswei/RLCT/tree/e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
Tanh
|
import torch
import torch.nn as nn
import torch.multiprocessing
class Tanh(nn.Module):
def __init__(self, input_dim, output_dim, H):
super(Tanh, self).__init__()
self.fc1 = nn.Linear(input_dim, H, bias=False)
self.fc2 = nn.Linear(H, output_dim, bias=False)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'H': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf1, primals_3
class TanhNew(nn.Module):
def __init__(self, input_dim, output_dim, H):
super(TanhNew, self).__init__()
self.fc1 = nn.Linear(input_dim, H, bias=False)
self.fc2 = nn.Linear(H, output_dim, bias=False)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_3 = self.fc2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
suswei/RLCT
|
Tanh
| false
| 4,478
|
[
"MIT"
] | 0
|
e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
https://github.com/suswei/RLCT/tree/e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
GaussianMixtureReconstructionLoss
|
import torch
import numpy as np
import torch as th
def gaussian_pdfs(dx, dy, params):
"""Returns the pdf at (dx, dy) for each Gaussian in the mixture.
"""
dx = dx.unsqueeze(-1)
dy = dy.unsqueeze(-1)
mu_x = params[..., 0]
mu_y = params[..., 1]
sigma_x = params[..., 2].exp()
sigma_y = params[..., 3].exp()
rho_xy = th.tanh(params[..., 4])
x = ((dx - mu_x) / sigma_x).pow(2)
y = ((dy - mu_y) / sigma_y).pow(2)
xy = (dx - mu_x) * (dy - mu_y) / (sigma_x * sigma_y)
arg = x + y - 2.0 * rho_xy * xy
pdf = th.exp(-arg / (2 * (1.0 - rho_xy.pow(2))))
norm = 2.0 * np.pi * sigma_x * sigma_y * (1.0 - rho_xy.pow(2)).sqrt()
return pdf / norm
class GaussianMixtureReconstructionLoss(th.nn.Module):
"""
Args:
"""
def __init__(self, eps=1e-05):
super(GaussianMixtureReconstructionLoss, self).__init__()
self.eps = eps
def forward(self, pen_logits, mixture_logits, gaussian_params, targets):
dx = targets[..., 0]
dy = targets[..., 1]
pen_state = targets[..., 2:].argmax(-1)
valid_stroke = (targets[..., -1] != 1.0).float()
mixture_weights = th.nn.functional.softmax(mixture_logits, -1)
pdfs = gaussian_pdfs(dx, dy, gaussian_params)
position_loss = -th.log(self.eps + (pdfs * mixture_weights).sum(-1))
position_loss = (position_loss * valid_stroke).sum(
) / valid_stroke.sum()
pen_loss = th.nn.functional.cross_entropy(pen_logits.view(-1, 3),
pen_state.view(-1))
return position_loss + pen_loss
def get_inputs():
return [torch.rand([4, 3]), torch.rand([4, 4]), torch.rand([4, 5]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch as th
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_exp_mul_neg_pow_rsub_sqrt_sub_tanh_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (2 + 5 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 5 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (4 + 5 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr2 + x2, xmask)
tmp38 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp41 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp43 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 / tmp4
tmp6 = tmp5 * tmp5
tmp9 = tmp7 - tmp8
tmp11 = tl_math.exp(tmp10)
tmp12 = tmp9 / tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp6 + tmp13
tmp16 = libdevice.tanh(tmp15)
tmp17 = 2.0
tmp18 = tmp16 * tmp17
tmp19 = tmp2 * tmp9
tmp20 = tmp4 * tmp11
tmp21 = tmp19 / tmp20
tmp22 = tmp18 * tmp21
tmp23 = tmp14 - tmp22
tmp24 = -tmp23
tmp25 = tmp16 * tmp16
tmp26 = 1.0
tmp27 = tmp26 - tmp25
tmp28 = tmp27 * tmp17
tmp29 = tmp24 / tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = 6.283185307179586
tmp32 = tmp4 * tmp31
tmp33 = tmp32 * tmp11
tmp34 = libdevice.sqrt(tmp27)
tmp35 = tmp33 * tmp34
tmp36 = tmp30 / tmp35
tmp40 = tmp38 + tmp39
tmp42 = tmp40 + tmp41
tmp44 = tmp42 + tmp43
tmp45 = tmp37 / tmp44
tmp46 = tmp36 * tmp45
tl.store(in_out_ptr0 + x2, tmp46, xmask)
@triton.jit
def triton_per_fused__log_softmax__to_copy_add_argmax_div_log_mul_ne_neg_nll_loss_forward_sum_view_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + 3 * r0, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 3 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + 3 * r0), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp17 = tl.full([1, 1], -100, tl.int64)
tmp18 = tmp16 != tmp17
tmp19 = tl.where(tmp18, tmp16, tmp10)
tmp20 = tl.full([XBLOCK, RBLOCK], 3, tl.int32)
tmp21 = tmp19 + tmp20
tmp22 = tmp19 < 0
tmp23 = tl.where(tmp22, tmp21, tmp19)
tl.device_assert((0 <= tmp23) & (tmp23 < 3),
'index out of bounds: 0 <= tmp23 < 3')
tmp25 = tl.load(in_ptr1 + (tmp23 + 3 * r0), None, eviction_policy=
'evict_last')
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tmp31 = tmp25 - tmp30
tmp32 = tmp26 - tmp30
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp27 - tmp30
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp29 - tmp30
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp40 = tl_math.log(tmp39)
tmp41 = tmp31 - tmp40
tmp44 = tmp42 + tmp43
tmp46 = tmp44 + tmp45
tmp48 = tmp46 + tmp47
tmp49 = 1e-05
tmp50 = tmp48 + tmp49
tmp51 = tl_math.log(tmp50)
tmp52 = -tmp51
tmp53 = 1.0
tmp54 = tmp1 != tmp53
tmp55 = tmp54.to(tl.float32)
tmp56 = tmp52 * tmp55
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK])
tmp62 = tl.sum(tmp60, 1)[:, None]
tmp63 = -tmp41
tmp64 = 0.0
tmp65 = tl.where(tmp18, tmp63, tmp64)
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp68 = tl.sum(tmp66, 1)[:, None]
tmp69 = tmp18.to(tl.int64)
tmp70 = tl.broadcast_to(tmp69, [XBLOCK, RBLOCK])
tmp72 = tl.sum(tmp70, 1)[:, None]
tmp73 = tmp59 / tmp62
tmp74 = tmp72.to(tl.float32)
tmp75 = tmp68 / tmp74
tmp76 = tmp73 + tmp75
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp76, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 5), (5, 1))
assert_size_stride(arg3_1, (4, 3), (3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg1_1
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = buf0
del buf0
triton_poi_fused__softmax_add_div_exp_mul_neg_pow_rsub_sqrt_sub_tanh_1[
grid(16)](buf2, arg0_1, arg2_1, buf1, 16, XBLOCK=16, num_warps=
1, num_stages=1)
del arg2_1
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf8 = buf3
del buf3
triton_per_fused__log_softmax__to_copy_add_argmax_div_log_mul_ne_neg_nll_loss_forward_sum_view_2[
grid(1)](buf8, arg0_1, arg3_1, buf2, 1, 4, XBLOCK=1, num_warps=
2, num_stages=1)
del arg0_1
del arg3_1
del buf2
return buf8,
def gaussian_pdfs(dx, dy, params):
"""Returns the pdf at (dx, dy) for each Gaussian in the mixture.
"""
dx = dx.unsqueeze(-1)
dy = dy.unsqueeze(-1)
mu_x = params[..., 0]
mu_y = params[..., 1]
sigma_x = params[..., 2].exp()
sigma_y = params[..., 3].exp()
rho_xy = th.tanh(params[..., 4])
x = ((dx - mu_x) / sigma_x).pow(2)
y = ((dy - mu_y) / sigma_y).pow(2)
xy = (dx - mu_x) * (dy - mu_y) / (sigma_x * sigma_y)
arg = x + y - 2.0 * rho_xy * xy
pdf = th.exp(-arg / (2 * (1.0 - rho_xy.pow(2))))
norm = 2.0 * np.pi * sigma_x * sigma_y * (1.0 - rho_xy.pow(2)).sqrt()
return pdf / norm
class GaussianMixtureReconstructionLossNew(th.nn.Module):
"""
Args:
"""
def __init__(self, eps=1e-05):
super(GaussianMixtureReconstructionLossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1, input_2, input_3):
arg3_1 = input_0
arg0_1 = input_1
arg2_1 = input_2
arg1_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
|
v-a-s-a/diffvg
|
GaussianMixtureReconstructionLoss
| false
| 4,479
|
[
"Apache-2.0"
] | 0
|
3685f3d47a5a4e5c76c68643ebf383f809ba59ed
|
https://github.com/v-a-s-a/diffvg/tree/3685f3d47a5a4e5c76c68643ebf383f809ba59ed
|
PixelNorm
|
import torch
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data.distributed
class PixelNorm(nn.Module):
def __init__(self, dim):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=2, keepdim=
True) + 1e-08)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.cpp_extension
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelNormNew(nn.Module):
def __init__(self, dim):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Pragyanstha/SummerCamp2021
|
PixelNorm
| false
| 4,480
|
[
"MIT"
] | 0
|
caa8bba64020ba52bdef2b23a7a54de93e93b8af
|
https://github.com/Pragyanstha/SummerCamp2021/tree/caa8bba64020ba52bdef2b23a7a54de93e93b8af
|
UpsampleConv2d
|
from torch.nn import Module
import math
import torch
from torchvision.datasets import *
import torch.nn.functional as F
from torch.nn import Parameter
from torch.nn.modules.utils import _pair
from torchvision.transforms import *
class UpsampleConv2d(Module):
"""
To avoid the checkerboard artifacts of standard Fractionally-strided Convolution,
we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for
each convolutional window.
.. image:: _static/img/upconv.png
:width: 50%
:align: center
Reference:
Hang Zhang and Kristin Dana. "Multi-style Generative Network for Real-time Transfer."
*arXiv preprint arXiv:1703.06953 (2017)*
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Zero-padding added to one side of the output.
Default: 0
groups (int, optional): Number of blocked connections from input channels to output
channels. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
scale_factor (int): scaling factor for upsampling convolution. Default: 1
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]`
:math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels)
Examples:
>>> # With square kernels and equal stride
>>> m = nn.UpsampleCov2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = autograd.Variable(torch.randn(20, 16, 50, 100))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = autograd.Variable(torch.randn(1, 16, 12, 12))
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, scale_factor=1, bias=True):
super(UpsampleConv2d, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.scale_factor = scale_factor
self.weight = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
out = F.conv2d(input, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
return F.pixel_shuffle(out, self.scale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
from torchvision.datasets import *
from torch.nn import Parameter
from torch.nn.modules.utils import _pair
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 16, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0
), primals_1, primals_3
class UpsampleConv2dNew(Module):
"""
To avoid the checkerboard artifacts of standard Fractionally-strided Convolution,
we adapt an integer stride convolution but producing a :math:`2\\times 2` outputs for
each convolutional window.
.. image:: _static/img/upconv.png
:width: 50%
:align: center
Reference:
Hang Zhang and Kristin Dana. "Multi-style Generative Network for Real-time Transfer."
*arXiv preprint arXiv:1703.06953 (2017)*
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
output_padding (int or tuple, optional): Zero-padding added to one side of the output.
Default: 0
groups (int, optional): Number of blocked connections from input channels to output
channels. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
scale_factor (int): scaling factor for upsampling convolution. Default: 1
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = scale * (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\\_size[0] + output\\_padding[0]`
:math:`W_{out} = scale * (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\\_size[1] + output\\_padding[1]`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(in_channels, scale * scale * out_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (scale * scale * out_channels)
Examples:
>>> # With square kernels and equal stride
>>> m = nn.UpsampleCov2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.UpsampleCov2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = autograd.Variable(torch.randn(20, 16, 50, 100))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> input = autograd.Variable(torch.randn(1, 16, 12, 12))
>>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nn.UpsampleCov2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, scale_factor=1, bias=True):
super(UpsampleConv2dNew, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.scale_factor = scale_factor
self.weight = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels * scale_factor *
scale_factor))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
tousifulhaque/DANet
|
UpsampleConv2d
| false
| 4,481
|
[
"MIT"
] | 0
|
1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
https://github.com/tousifulhaque/DANet/tree/1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
Quantize
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Quantize(nn.Module):
def __init__(self, emb_dim, emb_size, decay=0.99, eps=1e-05, ema_flag=
False, bdt_flag=False):
super().__init__()
self.emb_dim = emb_dim
self.emb_size = emb_size
self.ema_flag = ema_flag
self.bdt_flag = bdt_flag
self.embedding = nn.Embedding(emb_size, emb_dim)
self.embedding.weight.data.uniform_(-1.0 / self.emb_size, 1.0 /
self.emb_size)
if self.ema_flag:
self.decay = decay
self.eps = eps
embed = torch.randn(emb_dim, emb_size)
self.register_buffer('ema_size', torch.zeros(emb_size))
self.register_buffer('ema_w', embed.clone())
def forward(self, x, use_ema=True):
if self.bdt_flag:
x = x.transpose(1, 2)
quantized_idx, quantized_onehot = self.vq(x)
embed_idx = torch.matmul(quantized_onehot.float(), self.embedding.
weight)
if self.training and self.ema_flag and use_ema:
self.ema_size = self.decay * self.ema_size + (1 - self.decay
) * torch.sum(quantized_onehot.view(-1, self.emb_size), 0)
embed_sum = torch.sum(torch.matmul(x.transpose(1, 2),
quantized_onehot.float()), dim=0)
self.ema_w.data = self.decay * self.ema_w.data + (1 - self.decay
) * embed_sum
n = torch.sum(self.ema_size)
self.ema_size = (self.ema_size + self.eps) / (n + self.emb_size *
self.eps) * n
embed_normalized = self.ema_w / self.ema_size.unsqueeze(0)
self.embedding.weight.data.copy_(embed_normalized.transpose(0, 1))
embed_idx_qx = x + (embed_idx - x).detach()
if self.bdt_flag:
embed_idx_qx = embed_idx_qx.transpose(1, 2)
return embed_idx, embed_idx_qx, quantized_idx
def vq(self, x):
flatten_x = x.reshape(-1, self.emb_dim)
dist = torch.sum(torch.pow(self.embedding.weight, 2), dim=1
) - 2 * torch.matmul(flatten_x, self.embedding.weight.T
) + torch.sum(torch.pow(flatten_x, 2), dim=1, keepdim=True)
quantized_idx = torch.argmin(dist, dim=1).view(x.size(0), x.size(1))
quantized_onehot = F.one_hot(quantized_idx, self.emb_size)
return quantized_idx, quantized_onehot
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'emb_dim': 4, 'emb_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_out_ptr0 + x2, xmask)
tmp15 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = 2.0
tmp13 = tmp11 * tmp12
tmp14 = tmp10 - tmp13
tmp16 = tmp15 * tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp26 = tmp14 + tmp25
tl.store(in_out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused_argmin_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 < tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 < tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 < tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused__to_copy_arange_eq_view_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = x0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.int64)
tmp4 = tmp3.to(tl.float32)
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp1 - tmp0
tmp3 = tmp0 + tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_sum_0[grid(64)](buf1, primals_2,
primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused_argmin_1[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__to_copy_arange_eq_view_2[grid(64)](buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, primals_2, out=buf4)
del primals_2
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_sub_3[grid(64)](primals_1, buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
return reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0
), buf5, reinterpret_tensor(buf2, (4, 4), (4, 1), 0
), reinterpret_tensor(buf3, (4, 16), (1, 4), 0)
class QuantizeNew(nn.Module):
def __init__(self, emb_dim, emb_size, decay=0.99, eps=1e-05, ema_flag=
False, bdt_flag=False):
super().__init__()
self.emb_dim = emb_dim
self.emb_size = emb_size
self.ema_flag = ema_flag
self.bdt_flag = bdt_flag
self.embedding = nn.Embedding(emb_size, emb_dim)
self.embedding.weight.data.uniform_(-1.0 / self.emb_size, 1.0 /
self.emb_size)
if self.ema_flag:
self.decay = decay
self.eps = eps
embed = torch.randn(emb_dim, emb_size)
self.register_buffer('ema_size', torch.zeros(emb_size))
self.register_buffer('ema_w', embed.clone())
def vq(self, x):
flatten_x = x.reshape(-1, self.emb_dim)
dist = torch.sum(torch.pow(self.embedding.weight, 2), dim=1
) - 2 * torch.matmul(flatten_x, self.embedding.weight.T
) + torch.sum(torch.pow(flatten_x, 2), dim=1, keepdim=True)
quantized_idx = torch.argmin(dist, dim=1).view(x.size(0), x.size(1))
quantized_onehot = F.one_hot(quantized_idx, self.emb_size)
return quantized_idx, quantized_onehot
def forward(self, input_0):
primals_2 = self.embedding.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1], output[2]
|
unilight/crank
|
Quantize
| false
| 4,482
|
[
"MIT"
] | 0
|
0dc5d9df17f3186155b1c9583ab604ff218ad9a6
|
https://github.com/unilight/crank/tree/0dc5d9df17f3186155b1c9583ab604ff218ad9a6
|
ConvPlus
|
import torch
import torch.nn as nn
import torch.utils.data
class ConvPlus(nn.Module):
def __init__(self, c1, c2, k=3, s=1, g=1, bias=True):
super(ConvPlus, self).__init__()
self.cv1 = nn.Conv2d(c1, c2, (k, 1), s, (k // 2, 0), groups=g, bias
=bias)
self.cv2 = nn.Conv2d(c1, c2, (1, k), s, (0, k // 2), groups=g, bias
=bias)
def forward(self, x):
return self.cv1(x) + self.cv2(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 1), (12, 3, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_convolution_0[grid(256)](buf2, primals_2, buf1,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_5
return buf2, primals_1, primals_3, primals_4
class ConvPlusNew(nn.Module):
def __init__(self, c1, c2, k=3, s=1, g=1, bias=True):
super(ConvPlusNew, self).__init__()
self.cv1 = nn.Conv2d(c1, c2, (k, 1), s, (k // 2, 0), groups=g, bias
=bias)
self.cv2 = nn.Conv2d(c1, c2, (1, k), s, (0, k // 2), groups=g, bias
=bias)
def forward(self, input_0):
primals_1 = self.cv1.weight
primals_2 = self.cv1.bias
primals_4 = self.cv2.weight
primals_5 = self.cv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
verchable/GenderDiversityCalc
|
ConvPlus
| false
| 4,483
|
[
"Apache-2.0"
] | 0
|
eb07fbc9d13e567de4efd8ea2a0aae793a06bf1d
|
https://github.com/verchable/GenderDiversityCalc/tree/eb07fbc9d13e567de4efd8ea2a0aae793a06bf1d
|
Mean
|
import torch
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
class Mean(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torchvision.datasets import *
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MeanNew(nn.Module):
def __init__(self, dim, keep_dim=False):
super(MeanNew, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
tousifulhaque/DANet
|
Mean
| false
| 4,484
|
[
"MIT"
] | 0
|
1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
https://github.com/tousifulhaque/DANet/tree/1a0c91f0e551a071b5e335b4157313780a8a1b1a
|
cheap_cnn
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class cheap_cnn(nn.Module):
def __init__(self):
super(cheap_cnn, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3)
self.cnn2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
self.cnn3 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3)
self.cnn4 = nn.Conv2d(in_channels=32, out_channels=2, kernel_size=3)
self.flatten = nn.Flatten()
def forward(self, x):
x.size(0)
x = F.relu(self.cnn1(x))
x = F.relu(self.cnn2(x))
x = F.relu(self.cnn3(x))
x = F.relu(self.cnn4(x))
return self.flatten(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3136 % 2
x0 = xindex % 3136
x3 = xindex // 3136
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3200 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (2, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(492032)](buf1, primals_3,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(921600)](buf3, primals_5,
921600, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 58, 58), (107648, 3364, 58, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(430592)](buf5, primals_7,
430592, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 56, 56), (6272, 3136, 56, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 2, 56, 56), (6400, 3200, 56, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(25088)](
buf7, primals_9, buf8, 25088, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_9
return (reinterpret_tensor(buf7, (4, 6272), (6272, 1), 0), primals_1,
primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8)
class cheap_cnnNew(nn.Module):
def __init__(self):
super(cheap_cnnNew, self).__init__()
self.cnn1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3)
self.cnn2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
self.cnn3 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3)
self.cnn4 = nn.Conv2d(in_channels=32, out_channels=2, kernel_size=3)
self.flatten = nn.Flatten()
def forward(self, input_0):
primals_2 = self.cnn1.weight
primals_3 = self.cnn1.bias
primals_4 = self.cnn2.weight
primals_5 = self.cnn2.bias
primals_6 = self.cnn3.weight
primals_7 = self.cnn3.bias
primals_8 = self.cnn4.weight
primals_9 = self.cnn4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
vaibhav117/sim2real4real
|
cheap_cnn
| false
| 4,485
|
[
"MIT"
] | 0
|
b1f253ef359eda0c7e3b594f89c8a35f0cf925bf
|
https://github.com/vaibhav117/sim2real4real/tree/b1f253ef359eda0c7e3b594f89c8a35f0cf925bf
|
ZeroCenter
|
import torch
import torch.nn as nn
class ZeroCenter(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
"""x : [B, C, H, W]"""
return x.sub_(x.flatten(1).mean(1, keepdim=True).unsqueeze(-1).
unsqueeze(-1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_per_fused_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tmp7 = tmp0 - tmp6
tl.store(out_ptr2 + (r1 + 64 * x0), tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_per_fused_mean_sub_0[grid(4)](arg0_1, arg0_1, 4, 64, XBLOCK=
1, num_warps=2, num_stages=1)
return arg0_1,
class ZeroCenterNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vinnamkim/segmentation_models.pytorch
|
ZeroCenter
| false
| 4,486
|
[
"MIT"
] | 0
|
f967ded34df6fb536e8e8cba9b6491ae63b939f5
|
https://github.com/vinnamkim/segmentation_models.pytorch/tree/f967ded34df6fb536e8e8cba9b6491ae63b939f5
|
EnsembleDense
|
import math
import torch
from torch import nn
class EnsembleDense(nn.Module):
__constants__ = ['num_ensembles', 'in_features', 'out_features']
in_features: 'int'
out_features: 'int'
weight: 'torch.Tensor'
def __init__(self, num_ensembles: 'int', in_features: 'int',
out_features: 'int', bias: 'bool'=True) ->None:
super(EnsembleDense, self).__init__()
self.num_ensembles = num_ensembles
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(num_ensembles, in_features,
out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(num_ensembles, 1,
out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
fan = self.in_features
gain = nn.init.calculate_gain('leaky_relu', param=math.sqrt(5))
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std
with torch.no_grad():
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
fan_in = self.in_features
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.bmm(input, self.weight) + self.bias
def extra_repr(self) ->str:
return ('num_ensembles={}, in_features={}, out_features={}, bias={}'
.format(self.num_ensembles, self.in_features, self.out_features,
self.bias is not None))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_ensembles': 4, 'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0)
class EnsembleDenseNew(nn.Module):
__constants__ = ['num_ensembles', 'in_features', 'out_features']
in_features: 'int'
out_features: 'int'
weight: 'torch.Tensor'
def __init__(self, num_ensembles: 'int', in_features: 'int',
out_features: 'int', bias: 'bool'=True) ->None:
super(EnsembleDenseNew, self).__init__()
self.num_ensembles = num_ensembles
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(num_ensembles, in_features,
out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(num_ensembles, 1,
out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
fan = self.in_features
gain = nn.init.calculate_gain('leaky_relu', param=math.sqrt(5))
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std
with torch.no_grad():
nn.init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
fan_in = self.in_features
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def extra_repr(self) ->str:
return ('num_ensembles={}, in_features={}, out_features={}, bias={}'
.format(self.num_ensembles, self.in_features, self.out_features,
self.bias is not None))
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vermouth1992/rlutils
|
EnsembleDense
| false
| 4,487
|
[
"Apache-2.0"
] | 0
|
a326373b9e39dbf147c6c4261b82a688d4dc3e78
|
https://github.com/vermouth1992/rlutils/tree/a326373b9e39dbf147c6c4261b82a688d4dc3e78
|
FocalLoss
|
import torch
from torch import nn
from torchvision.datasets.folder import *
class FocalLoss(nn.Module):
def __init__(self, gamma=0, eps=1e-07):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input, target):
logp = self.ce(input, target)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision.datasets.folder import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp24 - tmp23
tmp26 = tmp24 * tmp21
tmp27 = tmp26 / tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1[grid
(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, gamma=0, eps=1e-07):
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tks1998/Pytorch-Face-recongition-state-of-the-art-Qmul-surveface-
|
FocalLoss
| false
| 4,488
|
[
"MIT"
] | 0
|
e4068db0c53a4c6b8e81127191687662806af8d8
|
https://github.com/tks1998/Pytorch-Face-recongition-state-of-the-art-Qmul-surveface-/tree/e4068db0c53a4c6b8e81127191687662806af8d8
|
Simple_AUG
|
import torch
import torch.nn as nn
from torch import autograd as autograd
import torch.fft
from itertools import product as product
class Simple_AUG(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nf=5):
super(Simple_AUG, self).__init__()
self.c1 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
self.c2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c4 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c6 = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.mp = nn.MaxPool2d(2)
def forward(self, x):
fea = self.lrelu(self.c1(x))
fea = self.lrelu(self.c2(fea))
fea = self.mp(fea)
fea = self.lrelu(self.c3(fea))
fea = self.lrelu(self.c4(fea))
fea = self.mp(fea)
fea = self.lrelu(self.c5(fea))
out = self.c6(fea)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch import autograd as autograd
import torch.fft
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 5
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 5
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 5120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 5
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (5, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (5, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_5, (5,), (1,))
assert_size_stride(primals_6, (5, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_7, (5,), (1,))
assert_size_stride(primals_8, (5, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_9, (5,), (1,))
assert_size_stride(primals_10, (5, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_11, (5,), (1,))
assert_size_stride(primals_12, (3, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_13, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 5, 64, 64), (20480, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(81920)](buf1,
primals_2, 81920, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 5, 64, 64), (20480, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_leaky_relu_0[grid(81920)](buf3,
primals_5, 81920, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 5, 32, 32), (5120, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 5, 32, 32), (5120, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(20480)](buf3, buf4,
buf5, 20480, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 5, 32, 32), (5120, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_leaky_relu_2[grid(20480)](buf7,
primals_7, 20480, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 5, 32, 32), (5120, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_leaky_relu_2[grid(20480)](buf9,
primals_9, 20480, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 5, 16, 16), (1280, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 5, 16, 16), (1280, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(5120)](buf9, buf10,
buf11, 5120, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 5, 16, 16), (1280, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_leaky_relu_4[grid(5120)](buf13,
primals_11, 5120, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 3, 16, 16), (768, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_5[grid(3072)](buf15, primals_13, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, buf1, buf3, buf4, buf5, buf7, buf9, buf10,
buf11, buf13)
class Simple_AUGNew(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nf=5):
super(Simple_AUGNew, self).__init__()
self.c1 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
self.c2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c4 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.c6 = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.mp = nn.MaxPool2d(2)
def forward(self, input_0):
primals_1 = self.c1.weight
primals_2 = self.c1.bias
primals_4 = self.c2.weight
primals_5 = self.c2.bias
primals_6 = self.c3.weight
primals_7 = self.c3.bias
primals_8 = self.c4.weight
primals_9 = self.c4.bias
primals_10 = self.c5.weight
primals_11 = self.c5.bias
primals_12 = self.c6.weight
primals_13 = self.c6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
varun-jois/KAIR
|
Simple_AUG
| false
| 4,489
|
[
"MIT"
] | 0
|
90c04671c6eb32a6765edfec94f7db3ba1f53f1e
|
https://github.com/varun-jois/KAIR/tree/90c04671c6eb32a6765edfec94f7db3ba1f53f1e
|
Normalize
|
import torch
import torch.nn as nn
import torch.nn.functional as functional
class Normalize(nn.Module):
def __init__(self, dim: 'int', p: 'int'):
super().__init__()
self.dim = dim
self.p = p
def forward(self, inputs):
outputs = functional.normalize(inputs, dim=self.dim, p=self.p)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'p': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp1 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = 0.25
tmp17 = libdevice.pow(tmp15, tmp16)
tmp18 = 1e-12
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp0 / tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
def __init__(self, dim: 'int', p: 'int'):
super().__init__()
self.dim = dim
self.p = p
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
uripatish/torchup
|
Normalize
| false
| 4,490
|
[
"MIT"
] | 0
|
0b7bee031fc99e536342331ba567c523a790d742
|
https://github.com/uripatish/torchup/tree/0b7bee031fc99e536342331ba567c523a790d742
|
ProteinBertPooler
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class ProteinBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.trainable_encoder = config.trainable_encoder
if self.trainable_encoder:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
if self.trainable_encoder:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
else:
mean_token_tensor = hidden_states.mean(dim=1)
pooled_output = self.dense(mean_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(trainable_encoder=False,
hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2
class ProteinBertPoolerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.trainable_encoder = config.trainable_encoder
if self.trainable_encoder:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
StephanHeijl/tape
|
ProteinBertPooler
| false
| 4,491
|
[
"BSD-3-Clause"
] | 0
|
ec631ca53217686605477cf31af4fb8846ff660f
|
https://github.com/StephanHeijl/tape/tree/ec631ca53217686605477cf31af4fb8846ff660f
|
Q
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Q(nn.Module):
def __init__(self, state_dim, action_dim, hidden):
super(Q, self).__init__()
self.fc1 = nn.Linear(state_dim + action_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.fc3 = nn.Linear(hidden, 1)
self.state_dim = state_dim
self.action_dim = action_dim
def forward(self, s, a):
s = s.reshape(-1, self.state_dim)
a = a.reshape(-1, self.action_dim)
x = torch.cat((s, a), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(256)](buf4, primals_6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class QNew(nn.Module):
def __init__(self, state_dim, action_dim, hidden):
super(QNew, self).__init__()
self.fc1 = nn.Linear(state_dim + action_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.fc3 = nn.Linear(hidden, 1)
self.state_dim = state_dim
self.action_dim = action_dim
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
victorkich/agaragan
|
Q
| false
| 4,492
|
[
"MIT"
] | 0
|
64e312fc4fa42f5952f3ce997bafe674306a9419
|
https://github.com/victorkich/agaragan/tree/64e312fc4fa42f5952f3ce997bafe674306a9419
|
ActorSAC
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class ActorSAC(nn.Module):
def __init__(self, state_dim, hidden, min_log_std=-20, max_log_std=2):
super(ActorSAC, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.mu_head = nn.Linear(hidden, 1)
self.log_std_head = nn.Linear(hidden, 1)
self.min_log_std = min_log_std
self.max_log_std = max_log_std
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
mu = self.mu_head(x)
log_std_head = F.relu(self.log_std_head(x))
log_std_head = torch.clamp(log_std_head, self.min_log_std, self.
max_log_std)
return mu, log_std_head
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'hidden': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_relu_threshold_backward_1(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = -20.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 2.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp5 >= tmp6
tmp11 = tmp5 <= tmp8
tmp12 = tmp10 & tmp11
tmp13 = 0.0
tmp14 = tmp5 <= tmp13
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_relu_threshold_backward_1[grid
(64)](buf6, primals_9, buf7, buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf6
del primals_9
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0
), buf8, buf9, primals_8, primals_6, buf10, primals_4, buf11
class ActorSACNew(nn.Module):
def __init__(self, state_dim, hidden, min_log_std=-20, max_log_std=2):
super(ActorSACNew, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden)
self.fc2 = nn.Linear(hidden, hidden)
self.mu_head = nn.Linear(hidden, 1)
self.log_std_head = nn.Linear(hidden, 1)
self.min_log_std = min_log_std
self.max_log_std = max_log_std
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.mu_head.weight
primals_7 = self.mu_head.bias
primals_8 = self.log_std_head.weight
primals_9 = self.log_std_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
|
victorkich/agaragan
|
ActorSAC
| false
| 4,493
|
[
"MIT"
] | 0
|
64e312fc4fa42f5952f3ce997bafe674306a9419
|
https://github.com/victorkich/agaragan/tree/64e312fc4fa42f5952f3ce997bafe674306a9419
|
PAM_Module
|
from torch.nn import Module
import torch
import torch.serialization
import torch
import torch.utils.data
from torch.nn import Conv2d
from torch.nn import Parameter
from torch.nn import Softmax
class PAM_Module(Module):
""" Position attention module"""
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import torch.serialization
import torch
import torch.utils.data
from torch.nn import Conv2d
from torch.nn import Parameter
from torch.nn import Softmax
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4)
buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf4
buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_0[grid(256)](buf9, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out
=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(256)](primals_8, buf10, primals_1,
buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8,
buf7, buf10, reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0))
class PAM_ModuleNew(Module):
""" Position attention module"""
def __init__(self, in_dim):
super(PAM_ModuleNew, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, input_0):
primals_8 = self.gamma
primals_2 = self.query_conv.weight
primals_3 = self.query_conv.bias
primals_4 = self.key_conv.weight
primals_5 = self.key_conv.bias
primals_6 = self.value_conv.weight
primals_7 = self.value_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
vis-opt-group/GTANet
|
PAM_Module
| false
| 4,494
|
[
"MIT"
] | 0
|
269ff4418ee5f0267987e1fa4c69bda13e5cb00d
|
https://github.com/vis-opt-group/GTANet/tree/269ff4418ee5f0267987e1fa4c69bda13e5cb00d
|
SE_layer_3d
|
import torch
import torch.nn as nn
import torch.multiprocessing
class SE_layer_3d(nn.Module):
def __init__(self, num_channels, reduction_ratio=2):
super(SE_layer_3d, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool = nn.AdaptiveAvgPool3d(1)
def forward(self, input_tensor):
b, c, _d, _w, _h = input_tensor.size()
squeeze_tensor = self.globalAvgPool(input_tensor).view(b, c).float()
fc_out_1 = self.relu(self.fc1(squeeze_tensor))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
output_tensor = torch.mul(input_tensor, fc_out_2.view(b, c, 1, 1, 1))
return output_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1, 1), (4, 1, 16, 16, 16),
torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 64, XBLOCK=8,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf2)
del primals_2
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4,
(2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_2[grid(1024)](primals_1, buf4, buf5, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), buf3, buf4, primals_4
class SE_layer_3dNew(nn.Module):
def __init__(self, num_channels, reduction_ratio=2):
super(SE_layer_3dNew, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool = nn.AdaptiveAvgPool3d(1)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vinbigdata-medical/abdomen-phases
|
SE_layer_3d
| false
| 4,495
|
[
"MIT"
] | 0
|
4adf5b8bf13aec85247d74e3cd3789c52cb88b92
|
https://github.com/vinbigdata-medical/abdomen-phases/tree/4adf5b8bf13aec85247d74e3cd3789c52cb88b92
|
Mean
|
import torch
from torch import nn
class Mean(nn.Module):
def __init__(self, *args):
super(Mean, self).__init__()
self.shape = args
def forward(self, x):
return x.mean(self.shape)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
return buf1,
class MeanNew(nn.Module):
def __init__(self, *args):
super(MeanNew, self).__init__()
self.shape = args
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vitskvara/shape-guided-anomaly-detection
|
Mean
| false
| 4,496
|
[
"MIT"
] | 0
|
6685b2e0b97968a6d0f478d2920486da107b277f
|
https://github.com/vitskvara/shape-guided-anomaly-detection/tree/6685b2e0b97968a6d0f478d2920486da107b277f
|
HighwayLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
class HighwayLayer(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(1 - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_relu_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp15 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused__softmax_add_mul_relu_rsub_1[grid(256)](buf4, buf2,
buf0, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return buf4, primals_3, buf0, buf1
class HighwayLayerNew(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, input_0):
primals_1 = self.highway_transform.weight
primals_2 = self.highway_transform.bias
primals_4 = self.highway_gate.weight
primals_5 = self.highway_gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
vincentLiangBerkeley/translate
|
HighwayLayer
| false
| 4,497
|
[
"BSD-3-Clause"
] | 0
|
734ae1ad9dfb778935e4825b5ce2687e2df559ea
|
https://github.com/vincentLiangBerkeley/translate/tree/734ae1ad9dfb778935e4825b5ce2687e2df559ea
|
Patch2Image
|
import torch
from torch import nn
class Patch2Image(nn.Module):
""" take in patch and copy n_up times to form the full image"""
def __init__(self, patch_sz, n_up):
super(Patch2Image, self).__init__()
self.patch_sz = patch_sz
self.n_up = n_up
def forward(self, x):
assert x.shape[-1
] == self.patch_sz, f'inp.patch_sz ({x.shape[-1]}): =/= self.patch_sz ({self.patch_sz})'
x = torch.cat([x] * self.n_up, -1)
x = torch.cat([x] * self.n_up, -2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'patch_sz': 4, 'n_up': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 4
x3 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * x3 + x0 % 4), None,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp0, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 16), (1024, 256, 64, 16, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(4096)](arg0_1, buf0, 4096, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 16, 16), (1024, 256, 16, 1), 0),
class Patch2ImageNew(nn.Module):
""" take in patch and copy n_up times to form the full image"""
def __init__(self, patch_sz, n_up):
super(Patch2ImageNew, self).__init__()
self.patch_sz = patch_sz
self.n_up = n_up
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vitskvara/shape-guided-anomaly-detection
|
Patch2Image
| false
| 4,498
|
[
"MIT"
] | 0
|
6685b2e0b97968a6d0f478d2920486da107b277f
|
https://github.com/vitskvara/shape-guided-anomaly-detection/tree/6685b2e0b97968a6d0f478d2920486da107b277f
|
Feature
|
import torch
import torch.serialization
import torch
import torch.utils.data
class ResBlock(torch.nn.Module):
def __init__(self):
super(ResBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv2 = torch.nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
def forward(self, frames):
"""
Args:
frames: 1x64xHxW
Returns: 1x64xHxW
"""
res = self.conv1(frames)
res = torch.nn.functional.relu(res)
res = self.conv2(res)
return frames + res
class Feature(torch.nn.Module):
def __init__(self):
super(Feature, self).__init__()
self.preconv = torch.nn.Conv2d(in_channels=3, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.resblock_1 = ResBlock()
self.resblock_2 = ResBlock()
self.resblock_3 = ResBlock()
self.resblock_4 = ResBlock()
self.resblock_5 = ResBlock()
self.conv1x1 = torch.nn.Conv2d(in_channels=64, out_channels=3,
kernel_size=1)
def forward(self, frame):
"""
Args:
frame: 1x3xHxW
Returns: 1x3xHxW
"""
x = self.preconv(frame)
x = self.resblock_1(x)
x = self.resblock_2(x)
x = self.resblock_3(x)
x = self.resblock_4(x)
x = self.resblock_5(x)
x = self.conv1x1(x)
return frame - x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.serialization
import torch
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_out_ptr0 + x3, None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_sub_3(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_out_ptr0 + x3, None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 - tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_21, (64,), (1,))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (3, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_25, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_2[grid(1048576)](buf5, buf1,
primals_7, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_1[grid(1048576)](buf7, primals_9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_add_convolution_2[grid(1048576)](buf9, buf5,
primals_11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_1[grid(1048576)](buf11,
primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_add_convolution_2[grid(1048576)](buf13, buf9,
primals_15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_1[grid(1048576)](buf15,
primals_17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf17 = buf16
del buf16
triton_poi_fused_add_convolution_2[grid(1048576)](buf17, buf13,
primals_19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_1[grid(1048576)](buf19,
primals_21, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_add_convolution_2[grid(1048576)](buf21, buf17,
primals_23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_sub_3[grid(49152)](buf23, primals_3,
primals_25, 49152, XBLOCK=512, num_warps=4, num_stages=1)
del primals_25
return (buf23, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, buf1, buf3, buf5, buf7, buf9,
buf11, buf13, buf15, buf17, buf19, buf21)
class ResBlock(torch.nn.Module):
def __init__(self):
super(ResBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv2 = torch.nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding=1)
def forward(self, frames):
"""
Args:
frames: 1x64xHxW
Returns: 1x64xHxW
"""
res = self.conv1(frames)
res = torch.nn.functional.relu(res)
res = self.conv2(res)
return frames + res
class FeatureNew(torch.nn.Module):
def __init__(self):
super(FeatureNew, self).__init__()
self.preconv = torch.nn.Conv2d(in_channels=3, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.resblock_1 = ResBlock()
self.resblock_2 = ResBlock()
self.resblock_3 = ResBlock()
self.resblock_4 = ResBlock()
self.resblock_5 = ResBlock()
self.conv1x1 = torch.nn.Conv2d(in_channels=64, out_channels=3,
kernel_size=1)
def forward(self, input_0):
primals_1 = self.preconv.weight
primals_2 = self.preconv.bias
primals_4 = self.resblock_1.conv1.weight
primals_5 = self.resblock_1.conv1.bias
primals_6 = self.resblock_1.conv2.weight
primals_7 = self.resblock_1.conv2.bias
primals_8 = self.resblock_2.conv1.weight
primals_9 = self.resblock_2.conv1.bias
primals_10 = self.resblock_2.conv2.weight
primals_11 = self.resblock_2.conv2.bias
primals_12 = self.resblock_3.conv1.weight
primals_13 = self.resblock_3.conv1.bias
primals_14 = self.resblock_3.conv2.weight
primals_15 = self.resblock_3.conv2.bias
primals_16 = self.resblock_4.conv1.weight
primals_17 = self.resblock_4.conv1.bias
primals_18 = self.resblock_4.conv2.weight
primals_19 = self.resblock_4.conv2.bias
primals_20 = self.resblock_5.conv1.weight
primals_21 = self.resblock_5.conv1.bias
primals_22 = self.resblock_5.conv2.weight
primals_23 = self.resblock_5.conv2.bias
primals_24 = self.conv1x1.weight
primals_25 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0]
|
vis-opt-group/GTANet
|
Feature
| false
| 4,499
|
[
"MIT"
] | 0
|
269ff4418ee5f0267987e1fa4c69bda13e5cb00d
|
https://github.com/vis-opt-group/GTANet/tree/269ff4418ee5f0267987e1fa4c69bda13e5cb00d
|
DuRB_p
|
import torch
import numpy as np
import torch.serialization
import torch
import torch.nn as nn
import torch.utils.data
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, stride, dilation=1):
super(ConvLayer, self).__init__()
self.dilation = dilation
if dilation == 1:
reflect_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflect_padding)
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation)
else:
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation, padding=dilation)
def forward(self, x):
if self.dilation == 1:
out = self.reflection_pad(x)
out = self.conv2d(out)
else:
out = self.conv2d(x)
return out
class DuRB_p(nn.Module):
def __init__(self, in_dim=32, out_dim=32, res_dim=32, k1_size=3,
k2_size=1, dilation=1, norm_type='batch_norm', with_relu=True):
super(DuRB_p, self).__init__()
self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
self.up_conv = ConvLayer(in_dim, res_dim, kernel_size=k1_size,
stride=1, dilation=dilation)
self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=k2_size,
stride=1)
self.with_relu = with_relu
self.relu = nn.ReLU()
def forward(self, x, res):
x_r = x
x = self.relu(self.conv1(x))
x = self.conv2(x)
x += x_r
x = self.relu(x)
x = self.up_conv(x)
x += res
x = self.relu(x)
res = x
x = self.down_conv(x)
x += x_r
if self.with_relu:
x = self.relu(x)
else:
pass
return x, res
def get_inputs():
return [torch.rand([4, 32, 4, 4]), torch.rand([4, 32, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.serialization
import torch
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, None)
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_4(out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math
.abs(-3 + x1) + 16 * x2), None)
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_6(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp9 = tl.load(in_ptr2 + x3, None)
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tmp11 = tmp9 + tmp10
tmp12 = tmp11 + tmp3
tmp13 = triton_helpers.maximum(tmp5, tmp12)
tmp14 = tmp13 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_2, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_9, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_10, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(4608)](primals_1, buf0,
4608, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
triton_poi_fused_convolution_reflection_pad2d_relu_1[grid(4608)](buf1,
primals_3, buf2, 4608, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
triton_poi_fused_add_convolution_reflection_pad2d_relu_2[grid(4608)](
buf3, primals_5, primals_1, buf4, 4608, XBLOCK=256, num_warps=4,
num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = buf5
del buf5
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_3[grid(2048)](
buf6, primals_7, primals_8, buf12, 2048, XBLOCK=256, num_warps=
4, num_stages=1)
del primals_7
del primals_8
buf7 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_reflection_pad2d_4[grid(4)](buf7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_reflection_pad2d_5[grid(2048)](buf6, buf8, 2048,
XBLOCK=256, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 4, 4), (512, 16, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_6[grid(2048)](
buf10, primals_10, primals_1, buf3, primals_5, buf11, buf13,
2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_1
del primals_10
del primals_5
buf14 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(2048)](buf1
, primals_3, buf14, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
return (buf10, buf6, primals_2, primals_4, primals_6, primals_9, buf0,
buf2, buf4, buf7, buf8, buf11, buf12, buf13, buf14)
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, stride, dilation=1):
super(ConvLayer, self).__init__()
self.dilation = dilation
if dilation == 1:
reflect_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflect_padding)
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation)
else:
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation, padding=dilation)
def forward(self, x):
if self.dilation == 1:
out = self.reflection_pad(x)
out = self.conv2d(out)
else:
out = self.conv2d(x)
return out
class DuRB_pNew(nn.Module):
def __init__(self, in_dim=32, out_dim=32, res_dim=32, k1_size=3,
k2_size=1, dilation=1, norm_type='batch_norm', with_relu=True):
super(DuRB_pNew, self).__init__()
self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
self.up_conv = ConvLayer(in_dim, res_dim, kernel_size=k1_size,
stride=1, dilation=dilation)
self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=k2_size,
stride=1)
self.with_relu = with_relu
self.relu = nn.ReLU()
def forward(self, input_0, input_1):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.conv2.conv2d.weight
primals_5 = self.conv2.conv2d.bias
primals_6 = self.up_conv.conv2d.weight
primals_7 = self.up_conv.conv2d.bias
primals_9 = self.down_conv.conv2d.weight
primals_10 = self.down_conv.conv2d.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
|
vis-opt-group/GTANet
|
DuRB_p
| false
| 4,500
|
[
"MIT"
] | 0
|
269ff4418ee5f0267987e1fa4c69bda13e5cb00d
|
https://github.com/vis-opt-group/GTANet/tree/269ff4418ee5f0267987e1fa4c69bda13e5cb00d
|
Accuracy
|
from torch.nn import Module
import torch
from torch import Tensor
class Accuracy(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
def forward(self, pred: 'Tensor', lab: 'Tensor') ->Tensor:
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
return Accuracy.calculate(pred, lab)
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch import Tensor
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0[grid(1)](arg0_1,
arg1_1, buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AccuracyNew(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
vixadd/sparseml
|
Accuracy
| false
| 4,501
|
[
"Apache-2.0"
] | 0
|
e2dcb66bad713542158dfe54cba113a0cc02ed39
|
https://github.com/vixadd/sparseml/tree/e2dcb66bad713542158dfe54cba113a0cc02ed39
|
PureUpsampling
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PureUpsampling(nn.Module):
def __init__(self, scale=2, mode='bilinear'):
super(PureUpsampling, self).__init__()
assert isinstance(scale, int)
self.scale = scale
self.mode = mode
def forward(self, x):
h, w = x.size(2) * self.scale, x.size(3) * self.scale
if self.mode == 'nearest':
xout = F.interpolate(input=x, size=(h, w), mode=self.mode)
else:
xout = F.interpolate(input=x, size=(h, w), mode=self.mode,
align_corners=True)
return xout
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf1, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class PureUpsamplingNew(nn.Module):
def __init__(self, scale=2, mode='bilinear'):
super(PureUpsamplingNew, self).__init__()
assert isinstance(scale, int)
self.scale = scale
self.mode = mode
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vlbthambawita/polyp-inpainting
|
PureUpsampling
| false
| 4,502
|
[
"MIT"
] | 0
|
f1d754f8ffb3f6d991206b2a661933ff32de0d7a
|
https://github.com/vlbthambawita/polyp-inpainting/tree/f1d754f8ffb3f6d991206b2a661933ff32de0d7a
|
RandomCrop
|
import torch
from torch import nn
def choose_rand_patches(x, patch_sz, dim):
assert dim == 2 or dim == 3
batch_sz = x.shape[0]
patches = x.unfold(dim, patch_sz, 1)
n_patches = patches.shape[2]
idx = torch.randint(0, n_patches, (batch_sz,))
if dim == 2:
patches = patches[torch.arange(batch_sz), :, idx, :]
elif dim == 3:
patches = patches[torch.arange(batch_sz), :, :, idx]
return patches
class RandomCrop(nn.Module):
def __init__(self, crop_sz):
super(RandomCrop, self).__init__()
self.crop_sz = crop_sz
def forward(self, x):
img_sz = x.shape[-1]
assert img_sz >= self.crop_sz, f'img_sz {img_sz} is too small for crop_sz {self.crop_sz}'
x = choose_rand_patches(x, self.crop_sz, 2)
x = choose_rand_patches(x, self.crop_sz, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'crop_sz': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 1) | ~xmask,
'index out of bounds: 0 <= tmp4 < 1')
tmp7 = tmp6 + tmp1
tmp8 = tmp6 < 0
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.device_assert((0 <= tmp9) & (tmp9 < 1) | ~xmask,
'index out of bounds: 0 <= tmp9 < 1')
tmp11 = tl.load(in_ptr2 + (tmp4 + x2 + 4 * tmp9), xmask)
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randint.low(0, 1, [4], device=device(type=
'cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = torch.ops.aten.randint.low(0, 1, [4], device=device(type=
'cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(256)](buf3, buf1, arg0_1, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf1
del buf3
return buf4,
def choose_rand_patches(x, patch_sz, dim):
assert dim == 2 or dim == 3
batch_sz = x.shape[0]
patches = x.unfold(dim, patch_sz, 1)
n_patches = patches.shape[2]
idx = torch.randint(0, n_patches, (batch_sz,))
if dim == 2:
patches = patches[torch.arange(batch_sz), :, idx, :]
elif dim == 3:
patches = patches[torch.arange(batch_sz), :, :, idx]
return patches
class RandomCropNew(nn.Module):
def __init__(self, crop_sz):
super(RandomCropNew, self).__init__()
self.crop_sz = crop_sz
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vitskvara/shape-guided-anomaly-detection
|
RandomCrop
| false
| 4,503
|
[
"MIT"
] | 0
|
6685b2e0b97968a6d0f478d2920486da107b277f
|
https://github.com/vitskvara/shape-guided-anomaly-detection/tree/6685b2e0b97968a6d0f478d2920486da107b277f
|
TVLoss
|
import torch
import torch.nn as nn
class TVLoss(nn.Module):
def __init__(self):
super(TVLoss, self).__init__()
def forward(self, x):
h_x, w_x = x.size()[2:]
h_tv = torch.abs(x[:, :, 1:, :] - x[:, :, :h_x - 1, :])
w_tv = torch.abs(x[:, :, :, 1:] - x[:, :, :, :w_x - 1])
loss = torch.sum(h_tv) + torch.sum(w_tv)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self):
super(TVLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vlbthambawita/polyp-inpainting
|
TVLoss
| false
| 4,504
|
[
"MIT"
] | 0
|
f1d754f8ffb3f6d991206b2a661933ff32de0d7a
|
https://github.com/vlbthambawita/polyp-inpainting/tree/f1d754f8ffb3f6d991206b2a661933ff32de0d7a
|
conv_head_pooling
|
import torch
import torch.nn as nn
import torch.utils.data
class conv_head_pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_pooling, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1,
'conv_type': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0)
class conv_head_poolingNew(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_poolingNew, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, input_0, input_1):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
tsubauaaa/d2go
|
conv_head_pooling
| false
| 4,505
|
[
"Apache-2.0"
] | 0
|
9f746159ebf78ce79f644c405ca8695bc29d1075
|
https://github.com/tsubauaaa/d2go/tree/9f746159ebf78ce79f644c405ca8695bc29d1075
|
ShortcutLayer
|
import torch
import torch.nn as nn
class ShortcutLayer(nn.Module):
def __init__(self, idx):
super(ShortcutLayer, self).__init__()
self.idx = idx
def forward(self, x, outputs):
return x + outputs[self.idx]
def get_inputs():
return [torch.rand([5, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return [[], {'idx': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (5, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((5, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(320)](arg1_1, arg0_1, buf0, 320, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ShortcutLayerNew(nn.Module):
def __init__(self, idx):
super(ShortcutLayerNew, self).__init__()
self.idx = idx
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
vrindaprabhu/solofy
|
ShortcutLayer
| false
| 4,506
|
[
"MIT"
] | 0
|
d5e26ff20d293c200485c70be6dcd6481afba396
|
https://github.com/vrindaprabhu/solofy/tree/d5e26ff20d293c200485c70be6dcd6481afba396
|
CustomGroupNorm
|
import torch
class CustomGroupNorm(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, x):
"""
Forward batch through network
Parameters
----------
x : :class:`torch.Tensor`
batch to forward
Returns
-------
:class:`torch.Tensor`
normalized results
"""
return self.norm(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 8
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 32.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4 // 2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 2, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf1 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf4 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(8)](primals_3, buf0, buf1,
buf4, 8, 32, XBLOCK=8, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_1
del primals_2
return buf3, primals_3, reinterpret_tensor(buf0, (4, 2, 1), (2, 1, 1), 0
), reinterpret_tensor(buf4, (4, 2, 1), (2, 1, 1), 0)
class CustomGroupNormNew(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, input_0):
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
vuamitom/shapenet
|
CustomGroupNorm
| false
| 4,507
|
[
"BSD-2-Clause"
] | 0
|
9eb3dadc91801756cb3460707c37146c8176643e
|
https://github.com/vuamitom/shapenet/tree/9eb3dadc91801756cb3460707c37146c8176643e
|
PairwiseNorm
|
import torch
from torch import nn
class PairwiseNorm(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, inp, target=None):
inp = inp.flatten(1)
assert len(inp) % 2 == 0
samples1, samples2 = inp[::2], inp[1::2]
norm = (samples1 - samples2).norm(p=self.order, dim=1).sum()
if self.average:
norm = norm / len(inp)
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_ptr0, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 2
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (64 + r1 + 128 * x0), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sum_1(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0[grid(2)](arg0_1, buf0, 2,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_div_linalg_vector_norm_sum_1[grid(1)](buf2, buf0,
1, 2, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf2,
class PairwiseNormNew(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vzinche/inferno
|
PairwiseNorm
| false
| 4,508
|
[
"Apache-2.0"
] | 0
|
91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
Norm
|
import torch
from torch import nn
class Norm(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, inp, target=None):
if target is not None:
inp = inp - target
inp = inp.flatten()
norm = torch.norm(inp, p=self.order)
if self.average:
norm = norm / len(inp)
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 0.00390625
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0[grid(1)](buf1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class NormNew(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vzinche/inferno
|
Norm
| false
| 4,509
|
[
"Apache-2.0"
] | 0
|
91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
ContrastiveLoss
|
import torch
from torch import nn
class ContrastiveLoss(nn.Module):
def __init__(self, margin=1.0, reduction='mean'):
super().__init__()
self.m = margin
assert reduction in ['mean', 'sum', 'none']
self.reduction = reduction
def forward(self, dist, class_):
dist = dist.transpose(0, -1)
loss = 0.5 * class_ * dist ** 2 + (1 - class_) * 0.5 * torch.clamp(
self.m - dist, min=0) ** 2
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 4
r1 = rindex // 4 % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp3 = tl.load(in_ptr1 + (r2 + 4 * r1 + 64 * r0), None, eviction_policy
='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp7 * tmp1
tmp9 = tmp6 - tmp3
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tmp11 * tmp11
tmp13 = tmp8 * tmp12
tmp14 = tmp5 + tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_mean_mul_pow_rsub_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ContrastiveLossNew(nn.Module):
def __init__(self, margin=1.0, reduction='mean'):
super().__init__()
self.m = margin
assert reduction in ['mean', 'sum', 'none']
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
vzinche/inferno
|
ContrastiveLoss
| false
| 4,510
|
[
"Apache-2.0"
] | 0
|
91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
WordPredictor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
class WordPredictor(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim() == 3
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
pred_logit = self.output_layer(pred_hidden)
return pred_logit
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_output_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 4
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x3 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + x1, tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.load(in_ptr2 + (16 + 4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr3 + (4 + x1), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp22 = tl.load(in_ptr2 + (32 + 4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr3 + (8 + x1), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tl.load(in_ptr2 + (48 + 4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr3 + (12 + x1), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp12, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp11, tmp31)
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 8), (8, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(128)](buf1, primals_3, primals_1, buf2,
128, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_4, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_relu_2[grid(16)](buf4, buf5, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_4[grid(32)](buf1, primals_3, primals_1, buf6,
buf7, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (8, 4), (1, 8
), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_5[grid(16)](buf9, primals_7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(16)](buf1,
primals_3, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf10, reinterpret_tensor(primals_1, (4, 4, 4), (16, 4, 1), 0
), buf0, reinterpret_tensor(buf2, (16, 8), (8, 1), 0
), buf4, buf7, buf9, primals_8, primals_6, primals_4, buf11
class WordPredictorNew(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def forward(self, input_0):
primals_2 = self.init_layer.weight
primals_3 = self.init_layer.bias
primals_4 = self.attn_layer.weight
primals_5 = self.attn_layer.bias
primals_6 = self.hidden_layer.weight
primals_7 = self.hidden_layer.bias
primals_8 = self.output_layer.weight
primals_9 = self.output_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
vincentLiangBerkeley/translate
|
WordPredictor
| false
| 4,511
|
[
"BSD-3-Clause"
] | 0
|
734ae1ad9dfb778935e4825b5ce2687e2df559ea
|
https://github.com/vincentLiangBerkeley/translate/tree/734ae1ad9dfb778935e4825b5ce2687e2df559ea
|
PairwiseCrossCorrelation
|
import torch
from torch import nn
class PairwiseCrossCorrelation(nn.Module):
def __init__(self, lambd=1):
super().__init__()
self.lambd = lambd
def off_diagonal(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, inp, target=None):
inp = inp.flatten(1)
assert len(inp) % 2 == 0
samples1, samples2 = inp[::2], inp[1::2]
c = samples1.T @ samples2
on_diag = torch.diagonal(c).add_(-1)
on_diag = torch.pow(on_diag, 2).sum()
off_diag = self.off_diagonal(c)
off_diag = torch.pow(off_diag, 2).sum()
loss = on_diag + self.lambd * off_diag
loss = loss / len(c) ** 2
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_ptr0, out_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 65 * r0, None, eviction_policy='evict_last')
tmp1 = -1.0
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr1 + tl.broadcast_to(65 * r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_red_fused_add_div_mul_pow_sum_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4032
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + 65 * (r0 // 64) + r0 % 64), rmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = tl.load(in_out_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1])
tmp7 = 1.0
tmp8 = tmp3 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 0.000244140625
tmp11 = tmp9 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (64, 2), (1, 128), 0),
reinterpret_tensor(arg0_1, (2, 64), (128, 1), 64), out=buf0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(1)](buf0, buf0, buf3, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf5 = buf3
del buf3
triton_red_fused_add_div_mul_pow_sum_1[grid(1)](buf5, buf0, 1, 4032,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del buf0
return buf5,
class PairwiseCrossCorrelationNew(nn.Module):
def __init__(self, lambd=1):
super().__init__()
self.lambd = lambd
def off_diagonal(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vzinche/inferno
|
PairwiseCrossCorrelation
| false
| 4,512
|
[
"Apache-2.0"
] | 0
|
91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034
|
Linear
|
import torch
from torch import Tensor
from warnings import warn
from torch.nn import functional as F
from torch.nn import Linear as normal_linear
import torch.utils.data
from torchvision import transforms as transforms
class Linear(normal_linear):
def __init__(self, *args, **kwargs):
super(Linear, self).__init__(*args, **kwargs)
if self.bias is not None:
warn(
'A Linear layer has bias, which may be not suitable with weight centralization.'
)
def forward(self, input: 'Tensor') ->Tensor:
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True)
weight = weight - weight_mean
return F.linear(input, weight, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from warnings import warn
from torch.nn import Linear as normal_linear
import torch.utils.data
from torchvision import transforms as transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(16)](primals_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del buf0
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearNew(normal_linear):
def __init__(self, *args, **kwargs):
super(LinearNew, self).__init__(*args, **kwargs)
if self.bias is not None:
warn(
'A Linear layer has bias, which may be not suitable with weight centralization.'
)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wang93/pytorch-cifar10
|
Linear
| false
| 4,513
|
[
"Apache-2.0"
] | 0
|
07a54dd575aad9b011114352d08fdd9f61e360a1
|
https://github.com/wang93/pytorch-cifar10/tree/07a54dd575aad9b011114352d08fdd9f61e360a1
|
Mnist_CNN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.quantization
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Mnist_CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, xb):
xb = xb.view(-1, 1, 28, 28)
xb = F.relu(self.conv1(xb))
xb = F.relu(self.conv2(xb))
xb = F.relu(self.conv3(xb))
xb = F.avg_pool2d(xb, 4)
return xb.view(-1, xb.size(1))
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.quantization
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(12544)](buf1, primals_3,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(3136)](buf3, primals_5,
3136, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(640)](buf5, primals_7, 640,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32)
triton_poi_fused_avg_pool2d_3[grid(40)](buf5, buf6, 40, XBLOCK=64,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf6, (4, 10), (10, 1), 0
), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5
class Mnist_CNNNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
voyageth/PyTorch-tutorials-kr
|
Mnist_CNN
| false
| 4,514
|
[
"BSD-3-Clause"
] | 0
|
05d2dd5931abfca6ce1e0b297f4ceb7f4eae6239
|
https://github.com/voyageth/PyTorch-tutorials-kr/tree/05d2dd5931abfca6ce1e0b297f4ceb7f4eae6239
|
FrequencyLoss
|
import torch
import torch.nn as nn
class FrequencyLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super(FrequencyLoss, self).__init__()
self.criterion = torch.nn.L1Loss()
def forward(self, x, y):
x_fft = torch.fft.rfft2(x, dim=(2, 3))
y_fft = torch.fft.rfft2(y, dim=(2, 3))
loss = self.criterion(x_fft, y_fft)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 192.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._fft_r2c.default(arg0_1, [2, 3], 0, True)
del arg0_1
buf1 = buf0
del buf0
buf2 = torch.ops.aten._fft_r2c.default(arg1_1, [2, 3], 0, True)
del arg1_1
buf3 = buf2
del buf2
buf4 = torch.ops.aten.sub.Tensor(buf1, buf3)
del buf1
del buf3
buf5 = buf4
del buf4
buf6 = torch.ops.aten.abs.default(buf5)
del buf5
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
get_raw_stream(0)
triton_per_fused_mean_0[grid(1)](buf9, buf7, 1, 192, XBLOCK=1,
num_warps=2, num_stages=1)
del buf7
return buf9,
class FrequencyLossNew(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super(FrequencyLossNew, self).__init__()
self.criterion = torch.nn.L1Loss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
vztu/DebandingNet
|
FrequencyLoss
| false
| 4,515
|
[
"MIT"
] | 0
|
4af8e83ffbfc70dc220dd6fea2827fb75796f10c
|
https://github.com/vztu/DebandingNet/tree/4af8e83ffbfc70dc220dd6fea2827fb75796f10c
|
FocalLossSigmoid
|
import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class FocalLossSigmoid(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoid, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, inputs, targets):
inputs.size(0)
inputs.size(1)
P = torch.sigmoid(inputs)
alpha_mask = self.alpha * targets
loss_pos = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(P
) * targets * alpha_mask
loss_neg = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(1 - P) * (
1 - targets) * (1 - alpha_mask)
batch_loss = loss_neg + loss_pos
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -1.0
tmp6 = tmp4 * tmp5
tmp7 = tl_math.log(tmp3)
tmp8 = tmp6 * tmp7
tmp10 = tmp2 - tmp9
tmp11 = tmp8 * tmp10
tmp12 = 0.25
tmp13 = tmp9 * tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp11 * tmp14
tmp16 = tl_math.log(tmp1)
tmp17 = tmp6 * tmp16
tmp18 = tmp17 * tmp9
tmp19 = tmp18 * tmp13
tmp20 = tmp15 + tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0[grid(1)](arg0_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FocalLossSigmoidNew(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoidNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wangbingok1118/SSD_Pytorch
|
FocalLossSigmoid
| false
| 4,516
|
[
"MIT"
] | 0
|
8d3f924671cec367c3c420eba2f002cc5b5181bb
|
https://github.com/wangbingok1118/SSD_Pytorch/tree/8d3f924671cec367c3c420eba2f002cc5b5181bb
|
MedianPool2d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1],
self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 3
x2 = xindex // 9 % 4
x3 = xindex // 36 % 4
x4 = xindex // 144
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0 + x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1 + x3)) + 16 *
x4), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 3, 3), (576, 144, 36, 9, 3,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = torch.ops.aten.median.dim(reinterpret_tensor(buf0, (4, 4, 4,
4, 9), (576, 144, 36, 9, 1), 0), -1)
del buf0
buf2 = buf1[0]
del buf1
return buf2,
class MedianPool2dNew(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
super(MedianPool2dNew, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
vztu/DebandingNet
|
MedianPool2d
| false
| 4,517
|
[
"MIT"
] | 0
|
4af8e83ffbfc70dc220dd6fea2827fb75796f10c
|
https://github.com/vztu/DebandingNet/tree/4af8e83ffbfc70dc220dd6fea2827fb75796f10c
|
ParseL1loss
|
import torch
from torch import nn
import torch.nn.functional as F
class ParseL1loss(nn.Module):
def __init__(self):
super(ParseL1loss, self).__init__()
def forward(self, output, target, mask):
mask = (mask == 1).float()
loss = F.l1_loss(output * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 0.0001)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp2 = 1.0
tmp3 = tmp1 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp0 * tmp4
tmp7 = tmp6 * tmp4
tmp8 = tmp5 - tmp7
tmp9 = tl_math.abs(tmp8)
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.broadcast_to(tmp4, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 0.0001
tmp17 = tmp15 + tmp16
tmp18 = tmp12 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0[grid(1)](buf2,
arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class ParseL1lossNew(nn.Module):
def __init__(self):
super(ParseL1lossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
weberhen/NonCuboidRoom
|
ParseL1loss
| false
| 4,518
|
[
"MIT"
] | 0
|
871a77941697f1457cdae541b8ffcdce4f9134e3
|
https://github.com/weberhen/NonCuboidRoom/tree/871a77941697f1457cdae541b8ffcdce4f9134e3
|
WassersteinLoss
|
import torch
from torch import nn
class WassersteinLoss(nn.Module):
"""For WGAN."""
def forward(self, real, fake):
return real.mean() - fake.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp7 / tmp8
tmp11 = tmp9 - tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_sub_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class WassersteinLossNew(nn.Module):
"""For WGAN."""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wegroupwolves/fastai
|
WassersteinLoss
| false
| 4,519
|
[
"Apache-2.0"
] | 0
|
df40df403e05e132411f0f7abc7ec33c86e58bb9
|
https://github.com/wegroupwolves/fastai/tree/df40df403e05e132411f0f7abc7ec33c86e58bb9
|
SpatialAttn
|
import torch
import torch.nn as nn
class SpatialAttn(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super(SpatialAttn, self).__init__()
def forward(self, x):
x = x.mean(1, keepdim=True)
h = x.size(2)
w = x.size(3)
x = x.view(x.size(0), -1)
z = x
for b in range(x.size(0)):
z[b] /= torch.sum(z[b])
z = z.view(x.size(0), 1, h, w)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_sum_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (16 + r0), None)
tmp3 = tl.load(in_ptr0 + (32 + r0), None)
tmp5 = tl.load(in_ptr0 + (48 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp8 / tmp11
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_div_sum_2(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (r0 + 4 * (r0 % 4 // 4)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 4 * (r0 % 4 // 4)), None)
tmp12 = tl.load(in_ptr0 + (r0 + 8 * (r0 % 4 // 4)), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + 8 * (r0 % 4 // 4)), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + 8 * (r0 % 4 // 4)), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.full([1, 1], 2, tl.int32)
tmp10 = tmp9 == tmp0
tmp11 = tmp0 == tmp0
tmp14 = tl.where(tmp2, tmp12, tmp13)
tmp15 = tmp14 / tmp8
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tmp9 == tmp1
tmp19 = tl.where(tmp17, tmp12, tmp18)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tl.where(tmp10, tmp16, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp21 / tmp24
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp25, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (16 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr1 + (x0 + 8 * (x0 % 4 // 4) + 16 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = tmp4 == tmp4
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp4 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp14 = tmp11 / tmp13
tmp15 = tl.where(tmp6, tmp14, tmp11)
tmp16 = tmp0 == tmp7
tmp18 = tl.where(tmp16, tmp9, tmp17)
tmp19 = tl.where(tmp5, tmp14, tmp18)
tmp20 = tl.where(tmp5, tmp15, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (32 + r0 + 4 * (r0 % 4 // 4)), None)
tmp4 = tl.load(in_ptr0 + (48 + r0 + 4 * (r0 % 4 // 4)), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tl.full([1, 1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp6 = tl.load(in_ptr0 + (32 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (48 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (x0 + 8 * (x0 % 4 // 4) + 16 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp1 == tmp4
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp11 = tmp8 / tmp10
tmp12 = tl.where(tmp3, tmp11, tmp8)
tmp13 = tmp0 == tmp4
tmp15 = tl.where(tmp13, tmp6, tmp14)
tmp16 = tl.where(tmp2, tmp11, tmp15)
tmp17 = tl.where(tmp2, tmp12, tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_div_sum_0[grid(1)](arg0_1, buf1, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_div_1[grid(64)](buf1, arg0_1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf1
del buf1
triton_per_fused_div_sum_2[grid(1)](buf2, buf3, buf5, 1, 16, XBLOCK
=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_div_3[grid(64)](buf5, buf2, buf3, buf6, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del buf5
buf7 = buf3
del buf3
triton_per_fused_sum_4[grid(1)](buf6, buf7, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf8 = buf2
del buf2
triton_poi_fused_5[grid(64)](buf6, buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf6
del buf7
return reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 16, 4, 1), 0),
class SpatialAttnNew(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super(SpatialAttnNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wangminjie920705/Part-reid
|
SpatialAttn
| false
| 4,520
|
[
"MIT"
] | 0
|
34a1e968a2eab692ba810332f309e82b441793f6
|
https://github.com/wangminjie920705/Part-reid/tree/34a1e968a2eab692ba810332f309e82b441793f6
|
FocalLoss
|
import torch
from torch import nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLoss, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, output, target):
cross_entropy = F.cross_entropy(output, target)
torch.log(cross_entropy)
logpt = -F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = -(1 - pt) ** self.focusing_param * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp25
tmp27 = -tmp26
tmp28 = tmp27 * tmp22
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1[grid(1)](
buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLossNew, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
wanghao15536870732/plants_disease_classify
|
FocalLoss
| false
| 4,521
|
[
"Apache-2.0"
] | 0
|
6d0d1d39f0ec15fc2bd523142c5c403a1577da84
|
https://github.com/wanghao15536870732/plants_disease_classify/tree/6d0d1d39f0ec15fc2bd523142c5c403a1577da84
|
SigmoidRange
|
import torch
from torch import nn
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class SigmoidRange(nn.Module):
"""Sigmoid module with range `(low,x_max)`"""
def __init__(self, low, high):
super().__init__()
self.low, self.high = low, high
def forward(self, x):
return sigmoid_range(x, self.low, self.high)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'low': 4, 'high': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.0
tmp3 = tmp1 * tmp2
tmp4 = 4.0
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class SigmoidRangeNew(nn.Module):
"""Sigmoid module with range `(low,x_max)`"""
def __init__(self, low, high):
super().__init__()
self.low, self.high = low, high
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wegroupwolves/fastai
|
SigmoidRange
| false
| 4,522
|
[
"Apache-2.0"
] | 0
|
df40df403e05e132411f0f7abc7ec33c86e58bb9
|
https://github.com/wegroupwolves/fastai/tree/df40df403e05e132411f0f7abc7ec33c86e58bb9
|
TemporalRelation
|
import torch
import torch.nn as nn
class TemporalRelation(nn.Module):
def __init__(self, feat_dim, time_window=1):
super(TemporalRelation, self).__init__()
self.time_window = time_window
self.feat_dim = feat_dim
self.WT = nn.Linear(self.feat_dim, self.feat_dim, bias=False)
def forward(self, feats):
relation_feature = []
att_feats = self.WT(feats)
for t in range(0, att_feats.size()[0], 1):
if t < self.time_window:
prev = att_feats[0, :, :]
else:
prev = att_feats[t - 1, :, :]
if t == att_feats.size()[0] - 1:
next = att_feats[t, :, :]
else:
next = att_feats[t + 1, :, :]
relation_feature.append(prev + next)
relation_feature = torch.stack(relation_feature, dim=0)
return relation_feature / 2 + feats
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feat_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_stack_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp41 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (64 + x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1)), tmp13 & xmask, other=0.0)
tmp15 = tl.load(in_ptr0 + (128 + x0 + 16 * (-4 + x1)), tmp13 & xmask,
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 12, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (64 + x0 + 16 * (-8 + x1)), tmp22 & xmask,
other=0.0)
tmp24 = tl.load(in_ptr0 + (192 + x0 + 16 * (-8 + x1)), tmp22 & xmask,
other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 16, tl.int64)
tmp31 = tl.load(in_ptr0 + (128 + x0 + 16 * (-12 + x1)), tmp28 & xmask,
other=0.0)
tmp32 = tl.load(in_ptr0 + (192 + x0 + 16 * (-12 + x1)), tmp28 & xmask,
other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tmp42 = tmp40 + tmp41
tl.store(in_out_ptr0 + x2, tmp42, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_add_div_stack_0[grid(256)](buf2, buf0, primals_2,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class TemporalRelationNew(nn.Module):
def __init__(self, feat_dim, time_window=1):
super(TemporalRelationNew, self).__init__()
self.time_window = time_window
self.feat_dim = feat_dim
self.WT = nn.Linear(self.feat_dim, self.feat_dim, bias=False)
def forward(self, input_0):
primals_1 = self.WT.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
weiyi1991/UA_Concurrent
|
TemporalRelation
| false
| 4,523
|
[
"MIT"
] | 0
|
11238c778c60095abf326800d6e6a13a643bf071
|
https://github.com/weiyi1991/UA_Concurrent/tree/11238c778c60095abf326800d6e6a13a643bf071
|
LinearNormalGamma
|
import torch
from torch import nn
class LinearNormalGamma(nn.Module):
def __init__(self, in_chanels, out_channels):
super().__init__()
self.linear = nn.Linear(in_chanels, out_channels * 4)
def evidence(self, x):
return torch.log(torch.exp(x) + 1)
def forward(self, x):
pred = self.linear(x).view(x.shape[0], -1, 4)
mu, logv, logalpha, logbeta = [w.squeeze(-1) for w in torch.split(
pred, 1, dim=-1)]
return mu, self.evidence(logv), self.evidence(logalpha
) + 1, self.evidence(logbeta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chanels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_log_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_log_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp4 + tmp2
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_exp_log_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_log_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_add_exp_log_1[grid(256)](buf0, buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_add_exp_log_2[grid(256)](buf0, buf5, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf0, (4, 64), (256, 4), 0
), buf2, buf4, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5
class LinearNormalGammaNew(nn.Module):
def __init__(self, in_chanels, out_channels):
super().__init__()
self.linear = nn.Linear(in_chanels, out_channels * 4)
def evidence(self, x):
return torch.log(torch.exp(x) + 1)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2], output[3]
|
wanzysky/evidential-deep-learning
|
LinearNormalGamma
| false
| 4,524
|
[
"Apache-2.0"
] | 0
|
71ebd59ab3a4b66c38d919e8aa9ad3711a416796
|
https://github.com/wanzysky/evidential-deep-learning/tree/71ebd59ab3a4b66c38d919e8aa9ad3711a416796
|
GMM_Module
|
import math
import torch
import torch.nn as nn
import torch.utils.data
class GMM_Module(nn.Module):
"""
GMM Module
"""
def __init__(self, out_channel_M, k):
super(GMM_Module, self).__init__()
self.conv1 = nn.Conv2d(int(out_channel_M), k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
1 * (k + 1) / (1 + 1)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.lrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(k * out_channel_M, 2 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2 *
1 * (k + 2 * k) / (k + k)))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.lrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(2 * k * out_channel_M, 3 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2 *
1 * (2 * k + 3 * k) / (2 * k + 2 * k)))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, input):
x = self.lrelu_1(self.conv1(input))
x = self.lrelu_2(self.conv2(x))
return self.conv3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_channel_M': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 48
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (48, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (48,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1024)](buf0,
primals_2, buf1, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_convolution_leaky_relu_1[grid(2048)](buf3,
primals_5, buf4, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 48, 4, 4), (768, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(3072)](buf7, primals_7, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf4, buf5)
class GMM_ModuleNew(nn.Module):
"""
GMM Module
"""
def __init__(self, out_channel_M, k):
super(GMM_ModuleNew, self).__init__()
self.conv1 = nn.Conv2d(int(out_channel_M), k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
1 * (k + 1) / (1 + 1)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.lrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(k * out_channel_M, 2 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2 *
1 * (k + 2 * k) / (k + k)))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.lrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(2 * k * out_channel_M, 3 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2 *
1 * (2 * k + 3 * k) / (2 * k + 2 * k)))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wemozj/Image-Compression-based-GMM-and-Attention-Module
|
GMM_Module
| false
| 4,525
|
[
"Apache-2.0"
] | 0
|
93f804dbcea8ffc1621456f3d104d0342c75373b
|
https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b
|
MobileViTv2Attention
|
import torch
from torch import nn
from torch.nn import init
class MobileViTv2Attention(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(MobileViTv2Attention, self).__init__()
self.fc_i = nn.Linear(d_model, 1)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.d_model = d_model
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input):
"""
Computes
:param queries: Queries (b_s, nq, d_model)
:return:
"""
i = self.fc_i(input)
weight_i = torch.softmax(i, dim=1)
context_score = weight_i * self.fc_k(input)
context_vector = torch.sum(context_score, dim=1, keepdim=True)
v = self.fc_v(input) * context_vector
out = self.fc_o(v)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf5)
del primals_6
del primals_7
buf6 = reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf2
triton_poi_fused__softmax_mul_sum_2[grid(64)](buf4, buf3, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_3[grid(256)](buf5, buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_9
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_8
class MobileViTv2AttentionNew(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(MobileViTv2AttentionNew, self).__init__()
self.fc_i = nn.Linear(d_model, 1)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.d_model = d_model
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.fc_i.weight
primals_2 = self.fc_i.bias
primals_4 = self.fc_k.weight
primals_5 = self.fc_k.bias
primals_6 = self.fc_v.weight
primals_7 = self.fc_v.bias
primals_8 = self.fc_o.weight
primals_9 = self.fc_o.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
weihaoxie/External-Attention-pytorch
|
MobileViTv2Attention
| false
| 4,526
|
[
"MIT"
] | 0
|
9bec70f4ed8dd858c815e9bad240ab2f95a91a9f
|
https://github.com/weihaoxie/External-Attention-pytorch/tree/9bec70f4ed8dd858c815e9bad240ab2f95a91a9f
|
BitEstimator
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimator(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super(BitEstimator, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, x):
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
return self.f4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_softplus_tanh_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last')
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 > tmp2
tmp17 = tl_math.exp(tmp15)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tl.where(tmp16, tmp15, tmp18)
tmp20 = tmp14 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = libdevice.tanh(tmp24)
tmp26 = tmp23 * tmp25
tmp27 = tmp22 + tmp26
tmp29 = tmp28 > tmp2
tmp30 = tl_math.exp(tmp28)
tmp31 = libdevice.log1p(tmp30)
tmp32 = tl.where(tmp29, tmp28, tmp31)
tmp33 = tmp27 * tmp32
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp38 = libdevice.tanh(tmp37)
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tmp42 = tmp41 > tmp2
tmp43 = tl_math.exp(tmp41)
tmp44 = libdevice.log1p(tmp43)
tmp45 = tl.where(tmp42, tmp41, tmp44)
tmp46 = tmp40 * tmp45
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tl.store(in_out_ptr0 + x3, tmp49, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0[grid(256)](buf1,
primals_2, primals_1, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10,
primals_11, primals_12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
return (buf1, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10, primals_11,
buf1)
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimatorNew(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super(BitEstimatorNew, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, input_0):
primals_1 = self.f1.h
primals_3 = self.f1.b
primals_4 = self.f1.a
primals_5 = self.f2.h
primals_6 = self.f2.b
primals_7 = self.f2.a
primals_8 = self.f3.h
primals_9 = self.f3.b
primals_10 = self.f3.a
primals_11 = self.f4.h
primals_12 = self.f4.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
wemozj/Image-Compression-based-GMM-and-Attention-Module
|
BitEstimator
| false
| 4,527
|
[
"Apache-2.0"
] | 0
|
93f804dbcea8ffc1621456f3d104d0342c75373b
|
https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b
|
Prototype
|
import torch
import torch.nn
import torch.optim
import torch.utils.data
class Prototype(torch.nn.Module):
""""""
def __init__(self, prototype_num, latent_size) ->None:
super(Prototype, self).__init__()
self.latent_size = latent_size
self.prototype_num = prototype_num
self.prototypes = torch.nn.Parameter(data=torch.rand(self.
prototype_num, self.latent_size), requires_grad=True)
def forward(self, input):
""""""
x = input.view(-1, self.latent_size)
dis_2 = self.__calc_dist_2(x, self.prototypes)
return dis_2, self.prototypes
def __calc_dist_2(self, x, y):
""""""
dis_2 = torch.cdist(x, y, p=2) ** 2
return dis_2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'prototype_num': 4, 'latent_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__euclidean_dist_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = -2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 5, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp14 * tmp14
tmp16 = tl.load(in_ptr0 + (1 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tl.load(in_ptr0 + (2 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tl.load(in_ptr0 + (3 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp0 >= tmp11
tl.full([1], 6, tl.int64)
tmp30 = 1.0
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp27, tmp30, tmp31)
tmp33 = tl.where(tmp13, tmp26, tmp32)
tmp34 = tl.where(tmp4, tmp9, tmp33)
tl.store(out_ptr0 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__euclidean_dist_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = 1.0
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp16 = tl.load(in_ptr0 + 4 * x1, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tl.load(in_ptr0 + (1 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tl.load(in_ptr0 + (2 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tl.load(in_ptr0 + (3 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp13, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp12, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__euclidean_dist_pow_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tmp3 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__euclidean_dist_0[grid(384)](primals_1, buf0, 384,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_poi_fused__euclidean_dist_1[grid(24)](primals_2, buf1, 24,
XBLOCK=32, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (6, 4), (1, 6), 0),
out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__euclidean_dist_pow_2[grid(256)](buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf2
class PrototypeNew(torch.nn.Module):
""""""
def __init__(self, prototype_num, latent_size) ->None:
super(PrototypeNew, self).__init__()
self.latent_size = latent_size
self.prototype_num = prototype_num
self.prototypes = torch.nn.Parameter(data=torch.rand(self.
prototype_num, self.latent_size), requires_grad=True)
def __calc_dist_2(self, x, y):
""""""
dis_2 = torch.cdist(x, y, p=2) ** 2
return dis_2
def forward(self, input_0):
primals_2 = self.prototypes
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
wenqiangxie/Prototype-Net
|
Prototype
| false
| 4,528
|
[
"MIT"
] | 0
|
a5ddd9976b78828d87806f9451a5092de3ff5c69
|
https://github.com/wenqiangxie/Prototype-Net/tree/a5ddd9976b78828d87806f9451a5092de3ff5c69
|
Model
|
import torch
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 2
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(32768)](buf1, primals_2, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ModelNew(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
whu-pzhang/mmsegmentation
|
Model
| false
| 4,529
|
[
"Apache-2.0"
] | 0
|
46326f63ce411c794d237e986dd3924590d0e75e
|
https://github.com/whu-pzhang/mmsegmentation/tree/46326f63ce411c794d237e986dd3924590d0e75e
|
MultiHeadAttention
|
import torch
from torch import Tensor
from typing import Any
import torch.nn.functional as F
from torch import nn
def ifnone(a: 'Any', b: 'Any') ->Any:
"""`a` if `a` is not None, otherwise `b`."""
return b if a is None else a
class MultiHeadAttention(nn.Module):
"""MutiHeadAttention."""
def __init__(self, n_heads: 'int', d_model: 'int', d_head: 'int'=None,
resid_p: 'float'=0.0, attn_p: 'float'=0.0, bias: 'bool'=True, scale:
'bool'=True):
super().__init__()
d_head = ifnone(d_head, d_model // n_heads)
self.n_heads, self.d_head, self.scale = n_heads, d_head, scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att, self.drop_res = nn.Dropout(attn_p), nn.Dropout(resid_p)
self.ln = nn.LayerNorm(d_model)
def forward(self, x: 'Tensor', mask: 'Tensor'=None, **kwargs):
return self.ln(x + self.drop_res(self.out(self._apply_attention(x,
mask=mask, **kwargs))))
def _apply_attention(self, x: 'Tensor', mask: 'Tensor'=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
wq, wk, wv = wq.permute(0, 2, 1, 3), wk.permute(0, 2, 3, 1
), wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale:
attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs,
x_len, -1)
def _attention_einsum(self, x, mask=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale:
attn_score.mul_(1 / self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_heads': 4, 'd_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import Any
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, buf2, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf0, primals_3, buf6, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_3
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_7
return buf12, primals_1, primals_6, buf5, reinterpret_tensor(buf8, (16,
4), (4, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0)
def ifnone(a: 'Any', b: 'Any') ->Any:
"""`a` if `a` is not None, otherwise `b`."""
return b if a is None else a
class MultiHeadAttentionNew(nn.Module):
"""MutiHeadAttention."""
def __init__(self, n_heads: 'int', d_model: 'int', d_head: 'int'=None,
resid_p: 'float'=0.0, attn_p: 'float'=0.0, bias: 'bool'=True, scale:
'bool'=True):
super().__init__()
d_head = ifnone(d_head, d_model // n_heads)
self.n_heads, self.d_head, self.scale = n_heads, d_head, scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att, self.drop_res = nn.Dropout(attn_p), nn.Dropout(resid_p)
self.ln = nn.LayerNorm(d_model)
def _apply_attention(self, x: 'Tensor', mask: 'Tensor'=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
wq, wk, wv = wq.permute(0, 2, 1, 3), wk.permute(0, 2, 3, 1
), wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale:
attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs,
x_len, -1)
def _attention_einsum(self, x, mask=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale:
attn_score.mul_(1 / self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
def forward(self, input_0):
primals_2 = self.attention.weight
primals_3 = self.attention.bias
primals_4 = self.out.weight
primals_5 = self.out.bias
primals_6 = self.ln.weight
primals_7 = self.ln.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
wegroupwolves/fastai
|
MultiHeadAttention
| false
| 4,530
|
[
"Apache-2.0"
] | 0
|
df40df403e05e132411f0f7abc7ec33c86e58bb9
|
https://github.com/wegroupwolves/fastai/tree/df40df403e05e132411f0f7abc7ec33c86e58bb9
|
GDN
|
from torch.autograd import Function
import torch
import torch.nn as nn
import torch.utils.data
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDN(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDN, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sub_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = libdevice.sqrt(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_mul_pow_sub_0[grid(16)](primals_3, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_pow_1[grid(256)](primals_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_convolution_maximum_mul_pow_sub_2[grid(4)](primals_2,
buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf1, reinterpret_tensor(buf0, (4,
4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3[grid(256)](
buf4, buf2, primals_1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
return buf5, primals_1, primals_2, primals_3, reinterpret_tensor(buf0,
(4, 4, 1, 1), (4, 1, 1, 1), 0), buf1, buf4
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDNNew(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDNNew, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
wemozj/Image-Compression-based-GMM-and-Attention-Module
|
GDN
| false
| 4,531
|
[
"Apache-2.0"
] | 0
|
93f804dbcea8ffc1621456f3d104d0342c75373b
|
https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b
|
UFOAttention
|
import torch
from torch import nn
from torch.nn import init
def XNorm(x, gamma):
norm_tensor = torch.norm(x, 2, -1, True)
return x * gamma / norm_tensor
class UFOAttention(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h, dropout=0.1):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(UFOAttention, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.gamma = nn.Parameter(torch.randn((1, h, 1, 1)))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, queries, keys, values):
b_s, nq = queries.shape[:2]
nk = keys.shape[1]
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2,
1, 3)
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2,
1, 3)
kv = torch.matmul(k, v)
kv_norm = XNorm(kv, self.gamma)
q_norm = XNorm(q, self.gamma)
out = torch.matmul(q_norm, kv_norm).permute(0, 2, 1, 3).contiguous(
).view(b_s, nq, self.h * self.d_v)
out = self.fc_o(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 16 % 4
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp2 / tmp14
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_div_linalg_vector_norm_mul_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x5 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp2 / tmp14
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_6, buf3, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(256)](buf2, primals_8, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_mul_2[grid(256)](buf5,
primals_10, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_div_linalg_vector_norm_mul_3[grid(256)](buf0,
primals_10, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf8, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf8
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(buf9, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0
), alpha=1, beta=1, out=buf10)
del primals_12
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), primals_10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf5, buf6, reinterpret_tensor(buf9, (16, 16), (16, 1), 0
), primals_11, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
def XNorm(x, gamma):
norm_tensor = torch.norm(x, 2, -1, True)
return x * gamma / norm_tensor
class UFOAttentionNew(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h, dropout=0.1):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(UFOAttentionNew, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.gamma = nn.Parameter(torch.randn((1, h, 1, 1)))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input_0, input_1, input_2):
primals_10 = self.gamma
primals_3 = self.fc_q.weight
primals_4 = self.fc_q.bias
primals_5 = self.fc_k.weight
primals_6 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_11 = self.fc_o.weight
primals_12 = self.fc_o.bias
primals_1 = input_0
primals_2 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
weihaoxie/External-Attention-pytorch
|
UFOAttention
| false
| 4,532
|
[
"MIT"
] | 0
|
9bec70f4ed8dd858c815e9bad240ab2f95a91a9f
|
https://github.com/weihaoxie/External-Attention-pytorch/tree/9bec70f4ed8dd858c815e9bad240ab2f95a91a9f
|
NaiveGate
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class NaiveGate(nn.Module):
"""
A naive gate implementation that defines the standard behavior of the gate
which determines which experts the tokens are going to.
Both the indecies and the score, or confidence, are output to the parent
module.
The load-balance strategies are also designed to be implemented within the
`Gate` module.
"""
def __init__(self, d_model, num_expert, world_size, top_k=2):
super().__init__()
self.gate = nn.Linear(d_model, num_expert * world_size)
self.top_k = top_k
def forward(self, inp):
"""
The naive implementation simply calculates the top-k of a linear layer's
output.
"""
gate = self.gate(inp)
gate_top_k_val, gate_top_k_idx = torch.topk(gate, k=self.top_k, dim
=-1, largest=True, sorted=False)
gate_top_k_val = gate_top_k_val.view(-1, self.top_k)
gate_score = F.softmax(gate_top_k_val, dim=-1).unsqueeze(1)
gate_top_k_idx = gate_top_k_idx.view(-1)
return gate_top_k_idx, gate_score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'num_expert': 4, 'world_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = torch.ops.aten.topk.default(reinterpret_tensor(buf0, (4, 4,
4, 16), (256, 64, 16, 1), 0), 2, -1, True, False)
del buf0
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(128)](buf2, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return reinterpret_tensor(buf3, (128,), (1,), 0), reinterpret_tensor(buf4,
(64, 1, 2), (2, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), buf3, buf4
class NaiveGateNew(nn.Module):
"""
A naive gate implementation that defines the standard behavior of the gate
which determines which experts the tokens are going to.
Both the indecies and the score, or confidence, are output to the parent
module.
The load-balance strategies are also designed to be implemented within the
`Gate` module.
"""
def __init__(self, d_model, num_expert, world_size, top_k=2):
super().__init__()
self.gate = nn.Linear(d_model, num_expert * world_size)
self.top_k = top_k
def forward(self, input_0):
primals_1 = self.gate.weight
primals_2 = self.gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
whn09/fastmoe
|
NaiveGate
| false
| 4,533
|
[
"Apache-2.0"
] | 0
|
d0ffaffc6431abcd3ea6d0287dbf09f8cd727a0a
|
https://github.com/whn09/fastmoe/tree/d0ffaffc6431abcd3ea6d0287dbf09f8cd727a0a
|
ActorNetwork
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNetwork(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'action_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__log_softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class ActorNetworkNew(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetworkNew, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
whongyu/MA3C
|
ActorNetwork
| false
| 4,534
|
[
"MIT"
] | 0
|
d3b38cf42a909c0938624ba853119804efaf47eb
|
https://github.com/whongyu/MA3C/tree/d3b38cf42a909c0938624ba853119804efaf47eb
|
ComplexBatchNormalize
|
import torch
import torch.nn as nn
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
def polarToCylindricalConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both magnitude and phase channels
in the last dims"""
ndims = input1.ndimension()
mag_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
phase_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
real = mag_input * torch.cos(phase_input)
imag = mag_input * torch.sin(phase_input)
return torch.stack((real, imag), dim=input1.ndimension() - 1)
else:
"""input1 is magnitude part and input2 is phase part; both of size [B,C,H,W,D]"""
real = input1 * torch.cos(input2)
imag = input1 * torch.sin(input2)
return real, imag
def normalizeComplexBatch_byMagnitudeOnly(x, polar=False):
""" normalize the complex batch by making the magnitude of mean 1 and std 1, and keep the phase as it is"""
ndims = x.ndimension()
shift_mean = 1
if not polar:
x = cylindricalToPolarConversion(x)
if ndims == 4:
mag = x[:, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 1, keepdim=
True).unsqueeze(mdims)) / torch.std(mag_shaped, mdims - 1,
keepdim=True).unsqueeze(mdims) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=3)
elif ndims == 5:
mag = x[:, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 2, keepdim=
True).unsqueeze(mdims - 1)) / torch.std(mag_shaped, mdims - 2,
keepdim=True).unsqueeze(mdims - 1) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=4)
elif ndims == 6:
mag = x[:, :, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3] * mag.shape[4]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 3, keepdim=
True).unsqueeze(mdims - 2)) / torch.std(mag_shaped, mdims - 3,
keepdim=True).unsqueeze(mdims - 2) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, :, 1]], dim=5)
x[x.ne(x)] = 0.0
if not polar:
x = polarToCylindricalConversion(x)
return x
class ComplexBatchNormalize(nn.Module):
def __init__(self):
super(ComplexBatchNormalize, self).__init__()
def forward(self, input):
return normalizeComplexBatch_byMagnitudeOnly(input)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_stack_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp16 = tl.load(in_ptr1 + x1, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp12, tmp16)
tmp18 = tmp17 != tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp17)
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 2 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 2 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp14 = tl.load(in_ptr0 + 2 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + 2 * x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0[grid(4)](arg0_1, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused_index_put_lift_fresh_stack_1[grid(8)](arg0_1, buf0,
buf1, 8, XBLOCK=8, num_warps=1, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused_stack_2[grid(8)](buf1, buf2, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del buf1
return buf2,
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
def polarToCylindricalConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both magnitude and phase channels
in the last dims"""
ndims = input1.ndimension()
mag_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
phase_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
real = mag_input * torch.cos(phase_input)
imag = mag_input * torch.sin(phase_input)
return torch.stack((real, imag), dim=input1.ndimension() - 1)
else:
"""input1 is magnitude part and input2 is phase part; both of size [B,C,H,W,D]"""
real = input1 * torch.cos(input2)
imag = input1 * torch.sin(input2)
return real, imag
def normalizeComplexBatch_byMagnitudeOnly(x, polar=False):
""" normalize the complex batch by making the magnitude of mean 1 and std 1, and keep the phase as it is"""
ndims = x.ndimension()
shift_mean = 1
if not polar:
x = cylindricalToPolarConversion(x)
if ndims == 4:
mag = x[:, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 1, keepdim=
True).unsqueeze(mdims)) / torch.std(mag_shaped, mdims - 1,
keepdim=True).unsqueeze(mdims) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=3)
elif ndims == 5:
mag = x[:, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 2, keepdim=
True).unsqueeze(mdims - 1)) / torch.std(mag_shaped, mdims - 2,
keepdim=True).unsqueeze(mdims - 1) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=4)
elif ndims == 6:
mag = x[:, :, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3] * mag.shape[4]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 3, keepdim=
True).unsqueeze(mdims - 2)) / torch.std(mag_shaped, mdims - 3,
keepdim=True).unsqueeze(mdims - 2) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, :, 1]], dim=5)
x[x.ne(x)] = 0.0
if not polar:
x = polarToCylindricalConversion(x)
return x
class ComplexBatchNormalizeNew(nn.Module):
def __init__(self):
super(ComplexBatchNormalizeNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wizofe/urus-mri-recon
|
ComplexBatchNormalize
| false
| 4,535
|
[
"MIT"
] | 0
|
eab8e48dca31d2b936ce69ccc251ec5a4a10facc
|
https://github.com/wizofe/urus-mri-recon/tree/eab8e48dca31d2b936ce69ccc251ec5a4a10facc
|
Channel_mean
|
import torch
import torch.nn as nn
class Channel_mean(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, V):
"""
only V[0]
"""
return torch.sum(V[0], dim=0).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_squeeze_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_squeeze_sum_0[grid(16)](arg0_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class Channel_meanNew(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
wk989898/ARES-implement
|
Channel_mean
| false
| 4,536
|
[
"MIT"
] | 0
|
b2411be01124feaccbc89d74f6025fbfa584bb3f
|
https://github.com/wk989898/ARES-implement/tree/b2411be01124feaccbc89d74f6025fbfa584bb3f
|
MnistFeatureExtractor
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MnistFeatureExtractor(nn.Module):
def __init__(self, activation=F.leaky_relu):
super(MnistFeatureExtractor, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.activation = activation
def get_mtx(self):
return None
def forward(self, x):
x = self.activation(F.max_pool2d(self.conv1(x), 2))
x = self.activation(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
return x.view(x.size(0), -1)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_max_pool2d_with_indices_1(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = xindex // 30
x2 = xindex // 9000
x4 = xindex % 9000
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.01
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(out_ptr0 + (x4 + 9088 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
tl.store(out_ptr2 + x5, tmp21, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 54080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_max_pool2d_with_indices_3(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = xindex // 13
x2 = xindex // 3380
x4 = xindex % 3380
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.01
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(out_ptr0 + (x4 + 3456 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 3456 * x2), tmp18, xmask)
tl.store(out_ptr2 + x5, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1),
torch.bool)
buf4 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1),
torch.float32)
triton_poi_fused_leaky_relu_max_pool2d_with_indices_1[grid(36000)](buf1
, buf2, buf3, buf4, 36000, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 20, 26, 26), (13520, 676, 26, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(54080)](buf6, primals_5, 54080,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1),
torch.int8)
buf8 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1),
torch.bool)
buf9 = empty_strided_cuda((4, 20, 13, 13), (3380, 169, 13, 1),
torch.float32)
triton_poi_fused_leaky_relu_max_pool2d_with_indices_3[grid(13520)](buf6
, buf7, buf8, buf9, 13520, XBLOCK=256, num_warps=4, num_stages=1)
return (reinterpret_tensor(buf9, (4, 3380), (3380, 1), 0), primals_1,
primals_3, primals_4, buf1, buf2, buf3, buf4, buf6, buf7, buf8)
class MnistFeatureExtractorNew(nn.Module):
def __init__(self, activation=F.leaky_relu):
super(MnistFeatureExtractorNew, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.activation = activation
def get_mtx(self):
return None
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
wiatrak2/BScThesis
|
MnistFeatureExtractor
| false
| 4,537
|
[
"MIT"
] | 0
|
e5dd012fd9052e7088d8464b409dc055dbfcf840
|
https://github.com/wiatrak2/BScThesis/tree/e5dd012fd9052e7088d8464b409dc055dbfcf840
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.