entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
StackingNNet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/wk/cwkgtiw4rsyedr3g7k3dha5xhwfl3k5ccryskhhubg4wicitzwjh.py
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
import torch.utils.data.distributed
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
def gelu(x):
return 0.5 * x * (1 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class StackingNNetNew(nn.Module):
def __init__(self, input_size, output_size):
super(StackingNNetNew, self).__init__()
self.fc1 = nn.Linear(input_size, output_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ECNU-ICA/ECNU-SenseMaker
|
StackingNNet
| false
| 8,005
|
[
"MIT"
] | 16
|
24f829c3dfefccea5fecbbe75904858ec1fefffb
|
https://github.com/ECNU-ICA/ECNU-SenseMaker/tree/24f829c3dfefccea5fecbbe75904858ec1fefffb
|
Sharpen_Block
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hi/chieqt2p7y3xojubgmoetn3r5tmgtaq7aft5irb7n2uvwm6zuiqy.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [pad, conv2d], Original ATen: [aten.reflection_pad2d, aten.convolution]
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
del arg1_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(144)](arg0_1, buf0, 144,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
del arg1_1
del buf0
return buf1,
class Sharpen_BlockNew(nn.Module):
def __init__(self):
super(Sharpen_BlockNew, self).__init__()
self.pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.conv = nn.Conv2d(1, 1, 3, 1, 0, bias=False)
self.conv.weight = nn.Parameter(torch.from_numpy(np.array([[[[0, -
0.4, 0], [0, 2.6, 0], [0, -0.4, 0]]]])).float())
self.conv.weight.requires_grad = False
def forward(self, input_0):
arg1_1 = self.conv.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
MingSun-Tse/pytorch-vdsr
|
Sharpen_Block
| false
| 5,610
|
[
"MIT"
] | 1
|
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
|
TorchSub
|
import torch
class TorchSub(torch.nn.Module):
def __init__(self):
super(TorchSub, self).__init__()
def forward(self, x, y):
return torch.sub(x, y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class TorchSubNew(torch.nn.Module):
def __init__(self):
super(TorchSubNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
NVIDIA-AI-IOT-private/torch2trt
|
TorchSub
| false
| 10,550
|
[
"MIT"
] | 0
|
953d60039e0c81e90eea467c3df2e6e3f7040242
|
https://github.com/NVIDIA-AI-IOT-private/torch2trt/tree/953d60039e0c81e90eea467c3df2e6e3f7040242
|
L1_Charbonnier_loss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/em/cemopf37cajc2ye56a3n75dw2wrilpxcfiwvulkl63jui2orhf5e.py
# Topologically Sorted Source Nodes: [neg, diff, mul, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# diff => add
# error => sqrt
# loss => mean
# mul => mul
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_mul_neg_sqrt_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_neg_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [neg, diff, mul, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_sqrt_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_neg_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_sqrt_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1_Charbonnier_lossNew(_Loss):
"""
L1 Charbonnierloss
"""
def __init__(self, para):
super(L1_Charbonnier_lossNew, self).__init__()
self.eps = 0.001
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
YDDDDG/3D2Unet
|
L1_Charbonnier_loss
| false
| 6,002
|
[
"MIT"
] | 1
|
daca056958fb2ae319dc18a350e04b3cefe0d99f
|
https://github.com/YDDDDG/3D2Unet/tree/daca056958fb2ae319dc18a350e04b3cefe0d99f
|
CrossEntropy
|
import torch
from torch import nn
from torch.nn import functional as F
class CrossEntropy(nn.Module):
def __init__(self, ignore_label=-1, weight=None):
super(CrossEntropy, self).__init__()
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=
ignore_label)
def forward(self, score, target):
ph, pw = score.size(2), score.size(3)
h, w = target.size(1), target.size(2)
if ph != h or pw != w:
score = F.upsample(input=score, size=(h, w), mode='bilinear')
loss = self.criterion(score, target)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class CrossEntropyNew(nn.Module):
def __init__(self, ignore_label=-1, weight=None):
super(CrossEntropyNew, self).__init__()
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=
ignore_label)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Gummary/Pytorch-Project-Template
|
CrossEntropy
| false
| 497
|
[
"MIT"
] | 0
|
56bc5e253627d40fb8771eccdb2bb663c833beb3
|
https://github.com/Gummary/Pytorch-Project-Template/tree/56bc5e253627d40fb8771eccdb2bb663c833beb3
|
MultiHeadAttn
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/fq/cfqwyletxlxsztzvms4ugcujphw6sshjg2bm6qtmkz3kg7omhsdb.py
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_score => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (32*y1) + (128*x2)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/q2/cq2bhrixv5xavijmo4bsnrvtws5kwvqhdummdlzij7fvpl7fcfb5.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => amax, clone_1, exp, sub
# Graph fragment:
# %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/kl/ckl6u3t54hef2b5wjd5dpgg7u4q64ygtqnuha2usouwtpaivlz52.py
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + ((4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*y1) + (16*x2)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/7t/c7tefgs6fnwuoixspsoxpumnyhdslyt74yuzsspadja5iim4s57k.py
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_vec => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_14,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + (4*x2) + (32*x3) + (128*x1)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/r2/cr2qmbufonkcpvzj5nto7mm2yb255fxx7ca2wtbl5deiamneh6dk.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_21,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_24), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/iz/cizh7p23zwsiqbrt6dvrlvjzpyujwvyyaolptfk5xtby6foymiaz.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# output => add_1, add_2, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_24), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_6), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [head_q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, buf2, 64, 4, grid=grid(64, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 16, 16, grid=grid(16, 16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf1, buf6, 256, grid=grid(256), stream=stream0)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del primals_6
return (buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * y1 + 128 * x2), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x2), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 32 * x3 + 128 * x1), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (32, 4), (4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(16, 16)](buf4, buf5, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(256)](buf1, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf9,
buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_6
return buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16,
16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class MultiHeadAttnNew(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttnNew, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / d_head ** 0.5
self.pre_lnorm = pre_lnorm
def forward(self, input_0):
primals_2 = self.q_net.weight
primals_3 = self.kv_net.weight
primals_4 = self.o_net.weight
primals_5 = self.layer_norm.weight
primals_6 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
UoMfzp/transformer-xl-Chinese-Pytorch
|
MultiHeadAttn
| false
| 11,959
|
[
"Apache-2.0"
] | 0
|
435641ed138e81f949c5b557b5a13c0a09fb6018
|
https://github.com/UoMfzp/transformer-xl-Chinese-Pytorch/tree/435641ed138e81f949c5b557b5a13c0a09fb6018
|
ModelWithDuplicates
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x7/cx7zib5vfcs4tjugjecjyojpxio3h7wkcy5bqp7pc5phvne4zdgj.py
# Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# x_2 => tanh
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%relu,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_tanh_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_tanh_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_tanh_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_tanh_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 10
x0 = xindex % 3600
x4 = (xindex // 3600)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + (x3), tmp5, xmask)
tl.store(out_ptr1 + (x0 + (3712*x4)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sl/cslyor46ejkl5lvclqvfd2qnnvpo2y3hutdhtpmver5xbwv2l3ek.py
# Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# x_5 => tanh_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%tanh, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%relu_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_tanh_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_tanh_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_tanh_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_tanh_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 269120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3364) % 20
x0 = xindex % 3364
x4 = (xindex // 3364)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + (x3), tmp5, xmask)
tl.store(out_ptr1 + (x0 + (3456*x4)), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_5, (20, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 10, 60, 60), (36000, 3600, 60, 1), torch.float32)
buf5 = empty_strided_cuda((4, 10, 60, 60), (37120, 3712, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_tanh_threshold_backward_0.run(buf0, primals_2, buf1, buf5, 144000, grid=grid(144000), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 58, 58), (67280, 3364, 58, 1))
buf3 = empty_strided_cuda((4, 20, 58, 58), (67280, 3364, 58, 1), torch.float32)
buf4 = empty_strided_cuda((4, 20, 58, 58), (69120, 3456, 58, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3, x_4, x_5], Original ATen: [aten.convolution, aten.relu, aten.tanh, aten.threshold_backward]
triton_poi_fused_convolution_relu_tanh_threshold_backward_1.run(buf2, primals_5, buf3, buf4, 269120, grid=grid(269120), stream=stream0)
del buf2
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from collections import OrderedDict
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.optim.lr_scheduler import *
import torch.optim.lr_scheduler
import torch.onnx
import torch.testing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_tanh_threshold_backward_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + x3, tmp5, xmask)
tl.store(out_ptr1 + (x0 + 3712 * x4), tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_tanh_threshold_backward_1(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 269120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 20
x0 = xindex % 3364
x4 = xindex // 3364
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = libdevice.tanh(tmp4)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(out_ptr0 + x3, tmp5, xmask)
tl.store(out_ptr1 + (x0 + 3456 * x4), tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_5, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = empty_strided_cuda((4, 10, 60, 60), (36000, 3600, 60, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 10, 60, 60), (37120, 3712, 60, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_tanh_threshold_backward_0[grid(
144000)](buf0, primals_2, buf1, buf5, 144000, XBLOCK=512,
num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 20, 58, 58), (67280, 3364, 58, 1))
buf3 = empty_strided_cuda((4, 20, 58, 58), (67280, 3364, 58, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 20, 58, 58), (69120, 3456, 58, 1),
torch.bool)
triton_poi_fused_convolution_relu_tanh_threshold_backward_1[grid(
269120)](buf2, primals_5, buf3, buf4, 269120, XBLOCK=512,
num_warps=8, num_stages=1)
del buf2
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf5
class ModelWithDuplicatesNew(nn.Module):
def __init__(self):
super(ModelWithDuplicatesNew, self).__init__()
self.conv1 = nn.Conv2d(3, 10, 5)
self.post_conv1 = nn.ModuleList([nn.ReLU(), nn.Tanh()])
self.conv2 = nn.Conv2d(10, 20, 3)
self.post_conv2 = self.post_conv1
self.expected_mlist_to_dmlist = OrderedDict([('post_conv1', [
'post_conv1']), ('post_conv2', ['post_conv2'])])
self.expected_list_contents_name_changes = OrderedDict([(
'post_conv1.0', 'post_conv1_0'), ('post_conv1.1',
'post_conv1_1'), ('post_conv2.0', 'post_conv2_0'), (
'post_conv2.1', 'post_conv2_1')])
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Emily0219/distiller
|
ModelWithDuplicates
| false
| 5,138
|
[
"Apache-2.0"
] | 1
|
445ed35b671fb54586acc280b53d951f18bf97ae
|
https://github.com/Emily0219/distiller/tree/445ed35b671fb54586acc280b53d951f18bf97ae
|
BPRLoss
|
import torch
import torch.nn as nn
class BPRLoss(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLoss, self).__init__()
self.gamma = gamma
def forward(self, pos_score, neg_score):
loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)
).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 1e-10
tmp5 = tmp3 + tmp4
tmp6 = tl_math.log(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tmp12 = -tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mean_neg_sigmoid_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BPRLossNew(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLossNew, self).__init__()
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Ahren09/RecBole
|
BPRLoss
| false
| 1,912
|
[
"MIT"
] | 0
|
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
|
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
|
AttwNetHead
|
import torch
import torch.nn as nn
import torch.distributed
import torch.optim.lr_scheduler
import torch.utils.data
class AttwNetHead(nn.Module):
def __init__(self, idim, hdim, odim):
super().__init__()
self.mlp_attn = nn.Linear(idim, 1, bias=False)
self.mlp_out = nn.Linear(idim, odim, bias=False)
def masked_softmax(self, vector: 'torch.Tensor', mask: 'torch.Tensor',
dim: 'int'=-1, memory_efficient: 'bool'=False, mask_fill_value:
'float'=-1e+32):
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).bool(),
mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result + 1e-13
def mask_softmax(self, feat, mask, dim=-1):
return self.masked_softmax(feat, mask, memory_efficient=True, dim=dim)
def get_mask_from_sequence_lengths(self, sequence_lengths:
'torch.Tensor', max_length: 'int'):
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def forward(self, mfeats, mask):
logits = self.mlp_attn(mfeats)
attw = self.mask_softmax(logits, mask.unsqueeze(-1).repeat(1, 1,
logits.shape[-1]), dim=1)
attn_feats = mfeats * attw
res = self.mlp_out(attn_feats)
return res, attw.squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'idim': 4, 'hdim': 4, 'odim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.distributed
import torch.optim.lr_scheduler
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax__to_copy_masked_fill_rsub_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
x2 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask)
tmp10 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = tmp2 != 0
tmp5 = -1.0000000331813535e+32
tmp6 = tl.where(tmp3, tmp5, tmp4)
tmp8 = tl.where(tmp3, tmp5, tmp7)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tl.where(tmp3, tmp5, tmp10)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tl.where(tmp3, tmp5, tmp13)
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp15, xmask)
tl.store(out_ptr2 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_add_masked_fill_mul_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x3 = xindex // 64
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr4 + (x1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp3 = -1.0000000331813535e+32
tmp4 = tl.where(tmp1, tmp3, tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 1e-13
tmp11 = tmp9 + tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused__softmax_add_masked_fill_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = -1.0000000331813535e+32
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = 1e-13
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 1), (4, 4, 1, 1), torch.bool)
buf2 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
buf3 = empty_strided_cuda((4, 1, 4, 1), (4, 16, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax__to_copy_masked_fill_rsub_0[grid(16)](
primals_3, buf0, buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_masked_fill_mul_1[grid(256)](primals_2,
buf1, buf0, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused__softmax_add_masked_fill_2[grid(64)](buf1, buf0,
buf2, buf3, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del buf3
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), primals_2, buf0, buf1, reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_4
class AttwNetHeadNew(nn.Module):
def __init__(self, idim, hdim, odim):
super().__init__()
self.mlp_attn = nn.Linear(idim, 1, bias=False)
self.mlp_out = nn.Linear(idim, odim, bias=False)
def masked_softmax(self, vector: 'torch.Tensor', mask: 'torch.Tensor',
dim: 'int'=-1, memory_efficient: 'bool'=False, mask_fill_value:
'float'=-1e+32):
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).bool(),
mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result + 1e-13
def mask_softmax(self, feat, mask, dim=-1):
return self.masked_softmax(feat, mask, memory_efficient=True, dim=dim)
def get_mask_from_sequence_lengths(self, sequence_lengths:
'torch.Tensor', max_length: 'int'):
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def forward(self, input_0, input_1):
primals_1 = self.mlp_attn.weight
primals_3 = self.mlp_out.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
CFM-MSG/SDN
|
AttwNetHead
| false
| 189
|
[
"MIT"
] | 0
|
f309602dc2bb73117355003f3744f8e5450dbccc
|
https://github.com/CFM-MSG/SDN/tree/f309602dc2bb73117355003f3744f8e5450dbccc
|
DQN_RAM
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN_RAM(nn.Module):
def __init__(self, in_features=4, num_actions=18):
"""
Initialize a deep Q-learning network for testing algorithm
in_features: number of features of input.
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(DQN_RAM, self).__init__()
self.fc1 = nn.Linear(in_features, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, num_actions)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (64, 128), (128, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (18, 64), (64, 1))
assert_size_stride(primals_9, (18,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf9, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf8, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_2[grid(4096)](buf5,
primals_7, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 18), (18, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 18), (1, 64), 0
), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 18), (288, 72, 18, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), reinterpret_tensor(buf5, (64, 64), (64, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class DQN_RAMNew(nn.Module):
def __init__(self, in_features=4, num_actions=18):
"""
Initialize a deep Q-learning network for testing algorithm
in_features: number of features of input.
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(DQN_RAMNew, self).__init__()
self.fc1 = nn.Linear(in_features, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, num_actions)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
transedward/pytoch-dqn
|
DQN_RAM
| false
| 16,621
|
[
"MIT"
] | 358
|
1ffda6f3724b3bb37c3195b09b651b1682d4d4fd
|
https://github.com/transedward/pytoch-dqn/tree/1ffda6f3724b3bb37c3195b09b651b1682d4d4fd
|
NavigatorBranch
|
import torch
import torch.nn as nn
def conv1x1(in_channels, out_channels, stride=1, groups=1, bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, groups=groups, bias=bias)
def conv3x3(in_channels, out_channels, stride=1, padding=1, dilation=1,
groups=1, bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
class NavigatorBranch(nn.Module):
"""
Navigator branch block for Navigator unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self, in_channels, out_channels, stride):
super(NavigatorBranch, self).__init__()
mid_channels = 128
self.down_conv = conv3x3(in_channels=in_channels, out_channels=
mid_channels, stride=stride, bias=True)
self.activ = nn.ReLU(inplace=False)
self.tidy_conv = conv1x1(in_channels=mid_channels, out_channels=
out_channels, bias=True)
self.flatten = Flatten()
def forward(self, x):
y = self.down_conv(x)
y = self.activ(y)
z = self.tidy_conv(y)
z = self.flatten(z)
return z, y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 2048 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 2048 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(512, 9)](primals_1, buf0, 512, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 128, 4, 4), (2048, 1, 512, 128))
buf3 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.
float32)
buf4 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(512, 16)](buf2, primals_2,
buf3, buf4, 512, 16, XBLOCK=16, YBLOCK=64, num_warps=4,
num_stages=1)
del buf2
del primals_2
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 1, 16, 4))
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_3[grid(16, 16)](buf5, primals_5, buf6,
16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf5
del primals_5
return reinterpret_tensor(buf6, (4, 64), (64, 1), 0
), buf3, buf0, buf1, primals_4, buf3
def conv1x1(in_channels, out_channels, stride=1, groups=1, bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, groups=groups, bias=bias)
def conv3x3(in_channels, out_channels, stride=1, padding=1, dilation=1,
groups=1, bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
class NavigatorBranchNew(nn.Module):
"""
Navigator branch block for Navigator unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self, in_channels, out_channels, stride):
super(NavigatorBranchNew, self).__init__()
mid_channels = 128
self.down_conv = conv3x3(in_channels=in_channels, out_channels=
mid_channels, stride=stride, bias=True)
self.activ = nn.ReLU(inplace=False)
self.tidy_conv = conv1x1(in_channels=mid_channels, out_channels=
out_channels, bias=True)
self.flatten = Flatten()
def forward(self, input_0):
primals_1 = self.down_conv.weight
primals_2 = self.down_conv.bias
primals_4 = self.tidy_conv.weight
primals_5 = self.tidy_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
iofthetiger/pkuad
|
NavigatorBranch
| false
| 6,905
|
[
"Apache-2.0"
] | 1
|
07496d108c614c84be028f344830becc9cac8fe5
|
https://github.com/iofthetiger/pkuad/tree/07496d108c614c84be028f344830becc9cac8fe5
|
ISub
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/tk/ctkgmfmzjuitsqnampr3rgu2evmz4dngqwytnh7daxge4todv4py.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sub]
# Source node to ATen node mapping:
# x => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %sub), kwargs = {})
triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr1 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_0.run(arg0_1, arg1_1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg1_1
return (arg0_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr1 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
return arg0_1,
class ISubNew(torch.nn.Module):
def __init__(self):
super(ISubNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Ilyabasharov/torch2trt
|
ISub
| false
| 2,545
|
[
"MIT"
] | 0
|
76bf298b3da408509665e23e2494922b131afb10
|
https://github.com/Ilyabasharov/torch2trt/tree/76bf298b3da408509665e23e2494922b131afb10
|
GRUStep
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/5b/c5bkw2jxfpnk3o5xqifvptqcde6oukvmpsxncnrr4hbmq6dbwwvm.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/tn/ctn3hks2zjt4xmjlguifn25zx6whcel77cdlsr33hdq2oyumfsoc.py
# Topologically Sorted Source Nodes: [z, t, sub, mul_1, mul_2, h_state], Original ATen: [aten.sigmoid, aten.tanh, aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# h_state => add
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# t => tanh
# z => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf2, primals_1, primals_2, buf3, 512, grid=grid(512), stream=stream0)
del primals_2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [z, t, sub, mul_1, mul_2, h_state], Original ATen: [aten.sigmoid, aten.tanh, aten.rsub, aten.mul, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2.run(buf1, primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf1, buf2, reinterpret_tensor(buf3, (64, 8), (8, 1), 0), buf4, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.multiprocessing
import torch.nn.modules.loss
from scipy.sparse import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.sigmoid(tmp5)
tmp7 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp4, tmp10, tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tmp3 * tmp4
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp5 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(512)](buf2, primals_1, primals_2, buf3,
512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(256)](buf1,
primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf1, buf2, reinterpret_tensor(buf3, (64, 8), (8, 1), 0
), buf4, primals_5
class GRUStepNew(nn.Module):
def __init__(self, hidden_size, input_size):
super(GRUStepNew, self).__init__()
"""GRU module"""
self.linear_z = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_r = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
self.linear_t = nn.Linear(hidden_size + input_size, hidden_size,
bias=False)
def forward(self, input_0, input_1):
primals_3 = self.linear_z.weight
primals_4 = self.linear_r.weight
primals_5 = self.linear_t.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
LucasAPayne/graph4nlp
|
GRUStep
| false
| 9,695
|
[
"Apache-2.0"
] | 0
|
3b72308f6ed9ce04c535f78b4b21b6ae0a8f5421
|
https://github.com/LucasAPayne/graph4nlp/tree/3b72308f6ed9ce04c535f78b4b21b6ae0a8f5421
|
policy1
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/7w/c7wzk6yj5mo2xrambdrq7gwfpmi54aba3fjc2wja3furjpct7zbl.py
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# mu => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 3
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp11, rmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((3, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(primals_1, buf2, 1, 3, grid=grid(1), stream=stream0)
del primals_1
return (buf2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
rnumel = 3
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp11, rmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((3,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(1)](primals_1, buf2, 1, 3, XBLOCK=
1, num_warps=2, num_stages=1)
del primals_1
return buf2, buf2
class policy1New(nn.Module):
def __init__(self):
super(policy1New, self).__init__()
self.sm = nn.Softmax(dim=-1)
self.actor = nn.Parameter(torch.FloatTensor([-0.35, 0.4, 1]))
def forward(self):
primals_1 = self.actor
output = call([primals_1])
return output[0]
|
JWongDude/FruitLoops
|
policy1
| false
| 11,527
|
[
"MIT"
] | 0
|
f4346d9db16ba619d71ce5bb819f5da08a88a120
|
https://github.com/JWongDude/FruitLoops/tree/f4346d9db16ba619d71ce5bb819f5da08a88a120
|
BalancedL1Loss
|
import functools
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta,
reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1LossNew(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1LossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
AtticusJohnson/mmdetection
|
BalancedL1Loss
| false
| 11,206
|
[
"Apache-2.0"
] | 0
|
d8d89bafcce13d3b32b1fb3366be3bb9830546c2
|
https://github.com/AtticusJohnson/mmdetection/tree/d8d89bafcce13d3b32b1fb3366be3bb9830546c2
|
RobertaClassificationHead
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5, num_labels=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4
class RobertaClassificationHeadNew(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHeadNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
HeartForNlp/VL-BERT
|
RobertaClassificationHead
| false
| 1,886
|
[
"MIT"
] | 0
|
c1a590e2597b592629329db126cf8eae74b49cc0
|
https://github.com/HeartForNlp/VL-BERT/tree/c1a590e2597b592629329db126cf8eae74b49cc0
|
EnchanceReLU
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lg/clgi5ub4djfnpjodb6m3eg6zyenzeobobic6z3zm4p5dim3iasau.py
# Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.add, aten.relu, aten.sub]
# Source node to ATen node mapping:
# x => add
# x_1 => relu
# x_2 => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 4), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, 4), kwargs = {})
triton_poi_fused_add_relu_sub_0 = async_compile.triton('triton_poi_fused_add_relu_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp4 - tmp1
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.add, aten.relu, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp4 - tmp1
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_relu_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class EnchanceReLUNew(nn.ReLU):
def __init__(self, args):
super(EnchanceReLUNew, self).__init__(inplace=True)
self.shift = getattr(args, 'fm_boundary', 0.25)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
XiaotaoChen/model-quantization
|
EnchanceReLU
| false
| 14,605
|
[
"BSD-2-Clause"
] | 66
|
a745ef691e9329b9c973a2dd795761cd3da8b6ae
|
https://github.com/XiaotaoChen/model-quantization/tree/a745ef691e9329b9c973a2dd795761cd3da8b6ae
|
SharedLinear
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SharedLinear(nn.Linear):
def __init__(self, in_features, out_features, share_weight=False):
super(SharedLinear, self).__init__(in_features, out_features, bias=True
)
if share_weight:
self.weight = nn.Parameter(torch.Tensor(1, in_features))
self.reset_parameters()
def forward(self, x):
return F.linear(x, self.weight) + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class SharedLinearNew(nn.Linear):
def __init__(self, in_features, out_features, share_weight=False):
super(SharedLinearNew, self).__init__(in_features, out_features,
bias=True)
if share_weight:
self.weight = nn.Parameter(torch.Tensor(1, in_features))
self.reset_parameters()
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
sdw95927/deconvGAN
|
SharedLinear
| false
| 12,954
|
[
"MIT"
] | 0
|
49dbbfe4827ed8366242870877165482d4ec1e75
|
https://github.com/sdw95927/deconvGAN/tree/49dbbfe4827ed8366242870877165482d4ec1e75
|
SE_Connect
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jn/cjnv5uptstyk4xaisuiw5kf5lbz3m33meejxhbfbsta5ozps7ijn.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# out => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ip/cip355a7nnydvbbk53yzzlgfxtclbx4sdaz6diiadsc7bk4g3ikp.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cm/ccmo3ssy4it32zgmnziqn6cih5z7bew4voyzb4nxpajh2zquk7fp.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# out_3 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0); del buf1 # reuse
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf5, 16, grid=grid(16), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, primals_1, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 1), (1, 1), 0), buf3, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 1), 0)
del buf1
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf2,
primals_3, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 1), (
1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(256)](primals_1, buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf4, primals_1, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(buf2, (16, 1), (1, 1), 0), buf3, primals_4, buf5
class SE_ConnectNew(nn.Module):
def __init__(self, channels, s=4):
super().__init__()
assert channels % s == 0, '{} % {} != 0'.format(channesl, s)
self.linear1 = nn.Linear(channels, channels // s)
self.linear2 = nn.Linear(channels // s, channels)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ishine/asv-subtools
|
SE_Connect
| false
| 15,644
|
[
"Apache-2.0"
] | 370
|
597dcb29a772b8113dbe7ab64f0d4cc1da298707
|
https://github.com/ishine/asv-subtools/tree/597dcb29a772b8113dbe7ab64f0d4cc1da298707
|
AGRUCell
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import *
class AGRUCell(nn.Module):
""" Attention based GRU (AGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_hh', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input, hx, att_score):
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
i_r, _i_z, i_n = gi.chunk(3, 1)
h_r, _h_z, h_n = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
new_state = torch.tanh(i_n + reset_gate * h_n)
att_score = att_score.view(-1, 1)
hy = (1.0 - att_score) * hx + att_score * new_state
return hy
def get_inputs():
return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from sklearn.metrics import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x2, xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp10 = tmp8 * tmp9
tmp13 = tmp11 + tmp12
tmp15 = tmp5 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = tmp6 * tmp17
tmp19 = tmp10 + tmp18
tmp20 = tmp17 * tmp17
tmp21 = tmp7 - tmp20
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp19, xmask)
tl.store(out_ptr2 + x2, tmp21, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0[grid(64)](
buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1,
(16, 4), (12, 1), 8), buf2, buf4
class AGRUCellNew(nn.Module):
""" Attention based GRU (AGRU)
Reference:
- Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.
"""
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor(3 * hidden_size, input_size)
)
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor(3 * hidden_size,
hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor(3 * hidden_size))
self.register_parameter('bias_hh', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias_ih
primals_5 = self.bias_hh
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
zzz123xyz/DeepCTR-Torch
|
AGRUCell
| false
| 4,748
|
[
"Apache-2.0"
] | 0
|
d6b880cc6b3761dbef90920a28182ef6737dd665
|
https://github.com/zzz123xyz/DeepCTR-Torch/tree/d6b880cc6b3761dbef90920a28182ef6737dd665
|
CircleLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/uk/cukuhlvspb6brmktd7ullkhxpd7nv43wkmlmuiylskqscjxuiyng.py
# Topologically Sorted Source Nodes: [add_2, an, sub_1, mul_2, logit_n, logsumexp, neg, add, add_1, ap, neg_1, sub, mul, logit_p, logsumexp_1, add_3, loss], Original ATen: [aten.add, aten.clamp_min, aten.sub, aten.mul, aten.logsumexp, aten.neg, aten.softplus]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_5
# an => clamp_min_1
# ap => clamp_min
# logit_n => mul_3
# logit_p => mul_1
# logsumexp => abs_1, add_3, amax, eq, exp, full_default, log, sub_2, sum_1, where
# logsumexp_1 => abs_2, add_4, amax_1, eq_1, exp_1, full_default_1, log_1, sub_3, sum_2, where_1
# loss => div, exp_2, gt, log1p, mul_4, where_2
# mul => mul
# mul_2 => mul_2
# neg => neg
# neg_1 => neg_1
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 4), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_2, 0.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 4), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min_1, %sub_1), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 4), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul_3, [0], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %amax), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0]), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %squeeze), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 4), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0.0), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, -3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %sub), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 4), kwargs = {})
# %amax_1 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul_1, [0], True), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_1,), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_2, inf), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %amax_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %where_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [0]), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_1, %squeeze_1), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %add_4), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_4, 20.0), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_4,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_5, %div), kwargs = {})
triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp44 = tl.load(in_ptr1 + (x0), xmask)
tmp55 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp65 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp75 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 - tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp1
tmp9 = tmp8 + tmp1
tmp10 = triton_helpers.maximum(tmp9, tmp3)
tmp11 = tmp8 - tmp1
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp1
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = tmp15 + tmp1
tmp17 = triton_helpers.maximum(tmp16, tmp3)
tmp18 = tmp15 - tmp1
tmp19 = tmp17 * tmp18
tmp20 = tmp19 * tmp1
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp23, tmp3)
tmp25 = tmp22 - tmp1
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp1
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float("inf")
tmp31 = tmp29 == tmp30
tmp32 = tl.where(tmp31, tmp3, tmp28)
tmp33 = tmp7 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp13 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp20 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp27 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp45 = -tmp44
tmp46 = 1.0
tmp47 = tmp45 + tmp46
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp48, tmp3)
tmp50 = -tmp49
tmp51 = -3.0
tmp52 = tmp44 - tmp51
tmp53 = tmp50 * tmp52
tmp54 = tmp53 * tmp1
tmp56 = -tmp55
tmp57 = tmp56 + tmp46
tmp58 = tmp57 + tmp1
tmp59 = triton_helpers.maximum(tmp58, tmp3)
tmp60 = -tmp59
tmp61 = tmp55 - tmp51
tmp62 = tmp60 * tmp61
tmp63 = tmp62 * tmp1
tmp64 = triton_helpers.maximum(tmp54, tmp63)
tmp66 = -tmp65
tmp67 = tmp66 + tmp46
tmp68 = tmp67 + tmp1
tmp69 = triton_helpers.maximum(tmp68, tmp3)
tmp70 = -tmp69
tmp71 = tmp65 - tmp51
tmp72 = tmp70 * tmp71
tmp73 = tmp72 * tmp1
tmp74 = triton_helpers.maximum(tmp64, tmp73)
tmp76 = -tmp75
tmp77 = tmp76 + tmp46
tmp78 = tmp77 + tmp1
tmp79 = triton_helpers.maximum(tmp78, tmp3)
tmp80 = -tmp79
tmp81 = tmp75 - tmp51
tmp82 = tmp80 * tmp81
tmp83 = tmp82 * tmp1
tmp84 = triton_helpers.maximum(tmp74, tmp83)
tmp85 = tl_math.abs(tmp84)
tmp86 = tmp85 == tmp30
tmp87 = tl.where(tmp86, tmp3, tmp84)
tmp88 = tmp54 - tmp87
tmp89 = tl_math.exp(tmp88)
tmp90 = tmp63 - tmp87
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tmp73 - tmp87
tmp94 = tl_math.exp(tmp93)
tmp95 = tmp92 + tmp94
tmp96 = tmp83 - tmp87
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp95 + tmp97
tmp99 = tl_math.log(tmp43)
tmp100 = tmp99 + tmp32
tmp101 = tl_math.log(tmp98)
tmp102 = tmp101 + tmp87
tmp103 = tmp100 + tmp102
tmp104 = tmp103 * tmp46
tmp105 = 20.0
tmp106 = tmp104 > tmp105
tmp107 = tl_math.exp(tmp104)
tmp108 = libdevice.log1p(tmp107)
tmp109 = tmp108 * tmp46
tmp110 = tl.where(tmp106, tmp103, tmp109)
tl.store(in_out_ptr0 + (x0), tmp110, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [add_2, an, sub_1, mul_2, logit_n, logsumexp, neg, add, add_1, ap, neg_1, sub, mul, logit_p, logsumexp_1, add_3, loss], Original ATen: [aten.add, aten.clamp_min, aten.sub, aten.mul, aten.logsumexp, aten.neg, aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0.run(buf5, arg1_1, arg0_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0(in_out_ptr0
, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp44 = tl.load(in_ptr1 + x0, xmask)
tmp55 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp65 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp75 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 - tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp1
tmp9 = tmp8 + tmp1
tmp10 = triton_helpers.maximum(tmp9, tmp3)
tmp11 = tmp8 - tmp1
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp1
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = tmp15 + tmp1
tmp17 = triton_helpers.maximum(tmp16, tmp3)
tmp18 = tmp15 - tmp1
tmp19 = tmp17 * tmp18
tmp20 = tmp19 * tmp1
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp23, tmp3)
tmp25 = tmp22 - tmp1
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp1
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float('inf')
tmp31 = tmp29 == tmp30
tmp32 = tl.where(tmp31, tmp3, tmp28)
tmp33 = tmp7 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp13 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp20 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp27 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp45 = -tmp44
tmp46 = 1.0
tmp47 = tmp45 + tmp46
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp48, tmp3)
tmp50 = -tmp49
tmp51 = -3.0
tmp52 = tmp44 - tmp51
tmp53 = tmp50 * tmp52
tmp54 = tmp53 * tmp1
tmp56 = -tmp55
tmp57 = tmp56 + tmp46
tmp58 = tmp57 + tmp1
tmp59 = triton_helpers.maximum(tmp58, tmp3)
tmp60 = -tmp59
tmp61 = tmp55 - tmp51
tmp62 = tmp60 * tmp61
tmp63 = tmp62 * tmp1
tmp64 = triton_helpers.maximum(tmp54, tmp63)
tmp66 = -tmp65
tmp67 = tmp66 + tmp46
tmp68 = tmp67 + tmp1
tmp69 = triton_helpers.maximum(tmp68, tmp3)
tmp70 = -tmp69
tmp71 = tmp65 - tmp51
tmp72 = tmp70 * tmp71
tmp73 = tmp72 * tmp1
tmp74 = triton_helpers.maximum(tmp64, tmp73)
tmp76 = -tmp75
tmp77 = tmp76 + tmp46
tmp78 = tmp77 + tmp1
tmp79 = triton_helpers.maximum(tmp78, tmp3)
tmp80 = -tmp79
tmp81 = tmp75 - tmp51
tmp82 = tmp80 * tmp81
tmp83 = tmp82 * tmp1
tmp84 = triton_helpers.maximum(tmp74, tmp83)
tmp85 = tl_math.abs(tmp84)
tmp86 = tmp85 == tmp30
tmp87 = tl.where(tmp86, tmp3, tmp84)
tmp88 = tmp54 - tmp87
tmp89 = tl_math.exp(tmp88)
tmp90 = tmp63 - tmp87
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tmp73 - tmp87
tmp94 = tl_math.exp(tmp93)
tmp95 = tmp92 + tmp94
tmp96 = tmp83 - tmp87
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp95 + tmp97
tmp99 = tl_math.log(tmp43)
tmp100 = tmp99 + tmp32
tmp101 = tl_math.log(tmp98)
tmp102 = tmp101 + tmp87
tmp103 = tmp100 + tmp102
tmp104 = tmp103 * tmp46
tmp105 = 20.0
tmp106 = tmp104 > tmp105
tmp107 = tl_math.exp(tmp104)
tmp108 = libdevice.log1p(tmp107)
tmp109 = tmp108 * tmp46
tmp110 = tl.where(tmp106, tmp103, tmp109)
tl.store(in_out_ptr0 + x0, tmp110, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0[grid
(64)](buf5, arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf5,
class CircleLossNew(nn.Module):
def __init__(self, m: 'float', gamma: 'float') ->None:
super(CircleLossNew, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
HaochengWan/PVT
|
CircleLoss
| false
| 8,225
|
[
"MIT"
] | 27
|
95818d303ee63084f044a057344b2049d1fa4492
|
https://github.com/HaochengWan/PVT/tree/95818d303ee63084f044a057344b2049d1fa4492
|
BetaIntersection
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/zn/czn6cztle4peyy4pa7mkag53s34sjkn6wpenptu6ttfuvhgzzrup.py
# Topologically Sorted Source Nodes: [all_embeddings], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# all_embeddings => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/sy/csyq3f2s27ausr5srhudvxwlyubeknzzno766mzdsi5o67nwnhkp.py
# Topologically Sorted Source Nodes: [layer1_act], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# layer1_act => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/mn/cmnz3bkiktkuzaehydcnb4xqrevh5t4w2sxpzh66xoipqjrqkyk3.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/v7/cv7k2ug56ugsp7esaw5fg3kavfiqa2qhhcjb3iyrpese5p5qrst5.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_2/inductor_cache/l6/cl6syhmwqs3ibr5e4y34snaescnxfp75oxb5teuv3tcdenybambt.py
# Topologically Sorted Source Nodes: [mul, alpha_embedding, mul_1, beta_embedding], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# alpha_embedding => sum_2
# beta_embedding => sum_3
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [0]), kwargs = {})
triton_poi_fused_mul_sum_4 = async_compile.triton('triton_poi_fused_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp15 = tl.load(in_ptr2 + (x0), xmask)
tmp17 = tl.load(in_ptr2 + (64 + x0), xmask)
tmp20 = tl.load(in_ptr2 + (128 + x0), xmask)
tmp23 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp0 * tmp15
tmp18 = tmp3 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp7 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp11 * tmp23
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + (x0), tmp14, xmask)
tl.store(out_ptr1 + (x0), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [all_embeddings], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 8), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [layer1_act], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf8, 512, grid=grid(512), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, alpha_embedding, mul_1, beta_embedding], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_4.run(buf5, primals_1, primals_2, buf6, buf7, 64, grid=grid(64), stream=stream0)
del buf5
return (buf6, buf7, primals_1, primals_2, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 8), (8, 1), 0), buf3, primals_5, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp12 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp15 = tl.load(in_ptr2 + x0, xmask)
tmp17 = tl.load(in_ptr2 + (64 + x0), xmask)
tmp20 = tl.load(in_ptr2 + (128 + x0), xmask)
tmp23 = tl.load(in_ptr2 + (192 + x0), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp0 * tmp15
tmp18 = tmp3 * tmp17
tmp19 = tmp16 + tmp18
tmp21 = tmp7 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp11 * tmp23
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp25, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 8), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf2,
primals_4, buf8, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_4[grid(64)](buf5, primals_1, primals_2,
buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
return buf6, buf7, primals_1, primals_2, reinterpret_tensor(buf0, (64,
8), (8, 1), 0), reinterpret_tensor(buf2, (64, 8), (8, 1), 0
), buf3, primals_5, buf8
class BetaIntersectionNew(nn.Module):
def __init__(self, dim):
super(BetaIntersectionNew, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(2 * self.dim, 2 * self.dim)
self.layer2 = nn.Linear(2 * self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, input_0, input_1):
primals_3 = self.layer1.weight
primals_4 = self.layer1.bias
primals_5 = self.layer2.weight
primals_6 = self.layer2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
HKUST-KnowComp/EFO-1-QA-benchmark
|
BetaIntersection
| false
| 17,373
|
[
"MIT"
] | 9
|
600fb02c76ab631f93ee362ceb789216ec085790
|
https://github.com/HKUST-KnowComp/EFO-1-QA-benchmark/tree/600fb02c76ab631f93ee362ceb789216ec085790
|
PatchMerging
|
import torch
import torch.nn as nn
from math import sqrt
import torch.nn.functional as F
import torch.functional as F
class PatchMerging(nn.Module):
"""Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
H = int(sqrt(L))
W = H
x = x.view(B, H, W, C)
pad_input = H % 2 == 1 or W % 2 == 1
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, :]
x2 = x[:, 0::2, 1::2, :]
x3 = x[:, 1::2, 1::2, :]
x = torch.cat([x0, x1, x2, x3], -1)
x = x.view(B, -1, 4 * C)
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) ->str:
return f'input_resolution={self.input_resolution}, dim={self.dim}'
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += H // 2 * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_resolution': 4, 'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp46 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp0 = r1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 * x0 + r1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (8 + 16 * x0 + (-4 + r1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (4 + 16 * x0 + (-8 + r1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1, 1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (12 + 16 * x0 + (-12 + r1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tl.where(xmask, tmp23, 0)
tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp28 = tl.where(xmask, tmp26, 0)
tmp29 = tl.sum(tmp28, 1)[:, None]
tmp30 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp29 / tmp31
tmp33 = tmp23 - tmp32
tmp34 = tmp33 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = tl.where(xmask, tmp35, 0)
tmp38 = tl.sum(tmp37, 1)[:, None]
tmp39 = 16.0
tmp40 = tmp38 / tmp39
tmp41 = 1e-05
tmp42 = tmp40 + tmp41
tmp43 = libdevice.rsqrt(tmp42)
tmp44 = tmp22 - tmp32
tmp45 = tmp44 * tmp43
tmp47 = tmp45 * tmp46
tmp49 = tmp47 + tmp48
tl.store(out_ptr0 + (r1 + 16 * x0), tmp22, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp43, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp49, xmask)
tl.store(out_ptr1 + x0, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (8, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 16), (16, 16, 16, 1), torch.float32
)
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf4 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cat_native_layer_norm_0[grid(4)](buf4, primals_1,
primals_2, primals_3, buf0, buf1, buf5, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_1
del primals_2
del primals_3
buf6 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6)
return reinterpret_tensor(buf6, (4, 1, 8), (8, 8, 1), 0
), buf0, buf1, buf4, reinterpret_tensor(buf5, (4, 16), (16, 1), 0
), primals_4
class PatchMergingNew(nn.Module):
"""Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def extra_repr(self) ->str:
return f'input_resolution={self.input_resolution}, dim={self.dim}'
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += H // 2 * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
def forward(self, input_0):
primals_4 = self.reduction.weight
primals_2 = self.norm.weight
primals_3 = self.norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
rahulmangalampalli/esvit
|
PatchMerging
| false
| 12,918
|
[
"MIT"
] | 0
|
5caf6e36b088ae2e7aaa4100b307eec991078e3e
|
https://github.com/rahulmangalampalli/esvit/tree/5caf6e36b088ae2e7aaa4100b307eec991078e3e
|
ConvNet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pv/cpv7qykvsb2x3mhhybt3e54zyj7yf52qrhpen6orewcwoez2g3mx.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/af/cafxypwziobqgsujacsjlhl4vifehmfd4kjv6mmsqwsmu52bn4ve.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ts/ctsxf36l57u3mq2ugcgebaybh3dyc2ufbhccjblzb2rb7pjushfr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qw/cqwg3xdwx3eugpxibvknyiosqdjdt7h7peu75twynersbsv447ra.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/23/c232cjjrpfm7piga4i2u2f6iwdslqscw63lwt2xylgovf64odkma.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4h/c4hcobkjh5ndccnwn27fc3y3a45kthbpdekx4r7i2rmnirh3widu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 1024], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 576
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (576*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + (32*x2) + (18432*y1)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lp/clpvz4wkxjigctsxh7jhtdveyv4cac4rp62cxpmaryo57tkuby3z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._prelu_kernel]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused__prelu_kernel_6 = async_compile.triton('triton_poi_fused__prelu_kernel_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x0), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/h7/ch7qxrxvtfhpc4flqbdhjn6lyu25duxy6rqrd4ufpbdezerdqqa3.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_5, %primals_6, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %convolution_1), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_7 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cx/ccx74wlwwf7cb5yjcdq3ooqngkcyb27ev663ivoe6bet5sddq3c4.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32) % 12
x2 = (xindex // 384)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (1536*x2)), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (1536*x2)), None)
tmp3 = tl.load(in_ptr0 + (768 + x0 + (64*x1) + (1536*x2)), None)
tmp5 = tl.load(in_ptr0 + (800 + x0 + (64*x1) + (1536*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lb/clbs5plq3l54kagj7xyoz4bjep23pwvvxay3m3nhjvzfteysjtlk.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_8, %primals_9, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %convolution_2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_9 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jb/cjb5dhwcpestwfavlatzw7rhg5pwgtoxv27zgkjahh7xtb2kiuoj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 6
x2 = (xindex // 384)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (1536*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (1536*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (768 + x0 + (128*x1) + (1536*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (832 + x0 + (128*x1) + (1536*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/d3/cd3jr7trplj436qr2ltgnqe53j66334viwky4sltzir4rahlmv53.py
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten._prelu_kernel]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_6 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_4 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_14, %primals_15, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %convolution_4), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
triton_poi_fused__prelu_kernel_convolution_11 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_11(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fc/cfc5r5uj2epaj3kwiyokvsydl4fpwcq6tzrvfzlhl3nmdpzkmuns.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_8 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_5, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y5 = yindex
y4 = (yindex // 9)
y6 = yindex % 9
tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (768 + x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (896 + x2 + (256*y0) + (1536*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (9*x2) + (1152*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xr/cxrbamhin4b5pl7ehs6gvheuobdvsywbxex23ck5t2wetzlfapk5.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten._prelu_kernel]
# Source node to ATen node mapping:
# x_10 => gt_6, mul_6, where_6
# Graph fragment:
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %addmm), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %addmm, %mul_6), kwargs = {})
triton_poi_fused__prelu_kernel_13 = async_compile.triton('triton_poi_fused__prelu_kernel_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 24, 24), (576, 576, 24, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_6, (32, ), (1, ))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (1, ), (1, ))
assert_size_stride(primals_11, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_12, (64, ), (1, ))
assert_size_stride(primals_13, (1, ), (1, ))
assert_size_stride(primals_14, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (1, ), (1, ))
assert_size_stride(primals_17, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_18, (128, ), (1, ))
assert_size_stride(primals_19, (1, ), (1, ))
assert_size_stride(primals_20, (2, 1152), (1152, 1))
assert_size_stride(primals_21, (2, ), (1, ))
assert_size_stride(primals_22, (1, ), (1, ))
assert_size_stride(primals_23, (4, 2), (2, 1))
assert_size_stride(primals_24, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_5, buf0, 1024, 25, grid=grid(1024, 25), stream=stream0)
del primals_5
buf1 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_8, buf1, 2048, 25, grid=grid(2048, 25), stream=stream0)
del primals_8
buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_11, buf2, 4096, 25, grid=grid(4096, 25), stream=stream0)
del primals_11
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_14, buf3, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_14
buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_17, buf4, 16384, 25, grid=grid(16384, 25), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 24, 24), (18432, 576, 24, 1))
buf6 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf5, primals_2, buf6, 128, 576, grid=grid(128, 576), stream=stream0)
del primals_2
buf7 = reinterpret_tensor(buf5, (4, 32, 24, 24), (18432, 1, 768, 32), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._prelu_kernel]
triton_poi_fused__prelu_kernel_6.run(buf6, primals_4, buf7, 73728, grid=grid(73728), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 24, 24), (18432, 1, 768, 32))
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_7.run(buf9, primals_6, primals_7, buf10, 73728, grid=grid(73728), stream=stream0)
del primals_6
buf11 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.float32)
buf12 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 12, 12), (9216, 1, 768, 64))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_9.run(buf14, primals_9, primals_10, buf15, 36864, grid=grid(36864), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 12, 12), (9216, 1, 768, 64))
buf17 = buf16; del buf16 # reuse
buf18 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_9.run(buf17, primals_12, primals_13, buf18, 36864, grid=grid(36864), stream=stream0)
del primals_12
buf19 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch.float32)
buf20 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf18, buf19, buf20, 9216, grid=grid(9216), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 6, 6), (4608, 1, 768, 128))
buf22 = buf21; del buf21 # reuse
buf23 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_11.run(buf22, primals_15, primals_16, buf23, 18432, grid=grid(18432), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 6, 6), (4608, 1, 768, 128))
buf25 = buf24; del buf24 # reuse
buf26 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten._prelu_kernel]
triton_poi_fused__prelu_kernel_convolution_11.run(buf25, primals_18, primals_19, buf26, 18432, grid=grid(18432), stream=stream0)
del primals_18
buf27 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128), torch.int8)
buf28 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf26, buf27, buf28, 36, 128, grid=grid(36, 128), stream=stream0)
buf29 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_21, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0), reinterpret_tensor(primals_20, (1152, 2), (1, 1152), 0), alpha=1, beta=1, out=buf29)
del primals_21
buf30 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten._prelu_kernel]
triton_poi_fused__prelu_kernel_13.run(buf29, primals_22, buf30, 8, grid=grid(8), stream=stream0)
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_24, buf30, reinterpret_tensor(primals_23, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf31)
del primals_24
return (buf30, buf31, primals_1, primals_3, primals_4, buf0, primals_7, buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4, primals_19, primals_22, buf6, buf7, buf9, buf10, buf11, buf12, buf14, buf15, buf17, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0), buf29, buf30, primals_23, primals_20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 24, 24), (576, 576, 24, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((2, 1152), (1152, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 576
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 576 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + 32 * x2 + 18432 * y1), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__prelu_kernel_6(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 12
x2 = xindex // 384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 1536 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 1536 * x2), None)
tmp3 = tl.load(in_ptr0 + (768 + x0 + 64 * x1 + 1536 * x2), None)
tmp5 = tl.load(in_ptr0 + (800 + x0 + 64 * x1 + 1536 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 6
x2 = xindex // 384
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 1536 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (768 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (832 + x0 + 128 * x1 + 1536 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_11(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 36
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y5 = yindex
y4 = yindex // 9
y6 = yindex % 9
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 1536 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 1536 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (768 + x2 + 256 * y0 + 1536 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (896 + x2 + 256 * y0 + 1536 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 9 * x2 + 1152 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused__prelu_kernel_13(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 24, 24), (576, 576, 24, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_6, (32,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_12, (64,), (1,))
assert_size_stride(primals_13, (1,), (1,))
assert_size_stride(primals_14, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (1,), (1,))
assert_size_stride(primals_17, (128, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_18, (128,), (1,))
assert_size_stride(primals_19, (1,), (1,))
assert_size_stride(primals_20, (2, 1152), (1152, 1))
assert_size_stride(primals_21, (2,), (1,))
assert_size_stride(primals_22, (1,), (1,))
assert_size_stride(primals_23, (4, 2), (2, 1))
assert_size_stride(primals_24, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 25)](primals_5, buf0, 1024, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_5
buf1 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch.
float32)
triton_poi_fused_1[grid(2048, 25)](primals_8, buf1, 2048, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch
.float32)
triton_poi_fused_2[grid(4096, 25)](primals_11, buf2, 4096, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 25)](primals_14, buf3, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_14
buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_4[grid(16384, 25)](primals_17, buf4, 16384, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_17
buf5 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 24, 24), (18432, 576, 24, 1))
buf6 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32),
torch.float32)
triton_poi_fused_convolution_5[grid(128, 576)](buf5, primals_2,
buf6, 128, 576, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf7 = reinterpret_tensor(buf5, (4, 32, 24, 24), (18432, 1, 768, 32), 0
)
del buf5
triton_poi_fused__prelu_kernel_6[grid(73728)](buf6, primals_4, buf7,
73728, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 24, 24), (18432, 1, 768, 32))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_7[grid(73728)](buf9,
primals_6, primals_7, buf10, 73728, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_6
buf11 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32),
torch.float32)
buf12 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(18432)](buf10,
buf11, buf12, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 64, 12, 12), (9216, 1, 768, 64))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf14,
primals_9, primals_10, buf15, 36864, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_9
buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 12, 12), (9216, 1, 768, 64))
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf17,
primals_12, primals_13, buf18, 36864, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_12
buf19 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch
.float32)
buf20 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(9216)](buf18,
buf19, buf20, 9216, XBLOCK=256, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 128, 6, 6), (4608, 1, 768, 128))
buf22 = buf21
del buf21
buf23 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf22,
primals_15, primals_16, buf23, 18432, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 6, 6), (4608, 1, 768, 128))
buf25 = buf24
del buf24
buf26 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf25,
primals_18, primals_19, buf26, 18432, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_18
buf27 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128),
torch.int8)
buf28 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(36, 128)](buf26,
buf27, buf28, 36, 128, XBLOCK=4, YBLOCK=64, num_warps=4,
num_stages=1)
buf29 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_21, reinterpret_tensor(buf28, (4, 1152
), (1152, 1), 0), reinterpret_tensor(primals_20, (1152, 2), (1,
1152), 0), alpha=1, beta=1, out=buf29)
del primals_21
buf30 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused__prelu_kernel_13[grid(8)](buf29, primals_22, buf30,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_24, buf30, reinterpret_tensor(
primals_23, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf31)
del primals_24
return (buf30, buf31, primals_1, primals_3, primals_4, buf0, primals_7,
buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4,
primals_19, primals_22, buf6, buf7, buf9, buf10, buf11, buf12,
buf14, buf15, buf17, buf18, buf19, buf20, buf22, buf23, buf25,
buf26, buf27, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0),
buf29, buf30, primals_23, primals_20)
class ConvNetNew(nn.Module):
"""LeNet++ as described in the Center Loss paper."""
def __init__(self, num_classes):
super(ConvNetNew, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, 5, stride=1, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, 5, stride=1, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, 5, stride=1, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, 5, stride=1, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, 5, stride=1, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, 5, stride=1, padding=2)
self.prelu3_2 = nn.PReLU()
self.fc1 = nn.Linear(128 * 3 * 3, 2)
self.prelu_fc1 = nn.PReLU()
self.fc2 = nn.Linear(2, num_classes)
def forward(self, input_0):
primals_1 = self.conv1_1.weight
primals_2 = self.conv1_1.bias
primals_4 = self.prelu1_1.weight
primals_5 = self.conv1_2.weight
primals_6 = self.conv1_2.bias
primals_7 = self.prelu1_2.weight
primals_8 = self.conv2_1.weight
primals_9 = self.conv2_1.bias
primals_10 = self.prelu2_1.weight
primals_11 = self.conv2_2.weight
primals_12 = self.conv2_2.bias
primals_13 = self.prelu2_2.weight
primals_14 = self.conv3_1.weight
primals_15 = self.conv3_1.bias
primals_16 = self.prelu3_1.weight
primals_17 = self.conv3_2.weight
primals_18 = self.conv3_2.bias
primals_19 = self.prelu3_2.weight
primals_20 = self.fc1.weight
primals_21 = self.fc1.bias
primals_22 = self.prelu_fc1.weight
primals_23 = self.fc2.weight
primals_24 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24])
return output[0], output[1]
|
SJHBXShub/Center_Loss
|
ConvNet
| false
| 14,402
|
[
"MIT"
] | 813
|
4097709144cf4cfc04d91ac1462ebf346b9f0448
|
https://github.com/SJHBXShub/Center_Loss/tree/4097709144cf4cfc04d91ac1462ebf346b9f0448
|
EntMinLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class EntMinLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, f_x):
soft_f_x = F.softmax(f_x, dim=-1)
log_soft_f_x = F.log_softmax(f_x, dim=-1)
ent = -torch.sum(soft_f_x * log_soft_f_x) / f_x.shape[0]
return ent
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
@triton.jit
def triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp25 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first',
other=0.0)
tmp10 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tl_math.exp(tmp10)
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp11 + tmp13
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tl_math.log(tmp20)
tmp22 = tmp9 - tmp21
tmp23 = tmp8 * tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = _tmp25 + tmp24
_tmp25 = tl.where(rmask, tmp26, _tmp25)
tmp25 = tl.sum(_tmp25, 1)[:, None]
tmp27 = -tmp25
tmp28 = 0.25
tmp29 = tmp27 * tmp28
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax__softmax_0[grid(256)](arg0_1, buf0,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_red_fused__log_softmax__softmax_div_mul_neg_sum_1[grid(1)](buf4,
buf0, buf1, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1
)
del buf0
del buf1
return buf4,
class EntMinLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
leoandeol/ldir
|
EntMinLoss
| false
| 3,887
|
[
"MIT"
] | 0
|
f90408c5fb16a52c6c5a76fff1c46b9062343ad5
|
https://github.com/leoandeol/ldir/tree/f90408c5fb16a52c6c5a76fff1c46b9062343ad5
|
UNetModule
|
import torch
import torch.nn as nn
import torch.backends.cudnn
import torch.utils.data
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class Conv3BN(nn.Module):
def __init__(self, in_: 'int', out: 'int', bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.SELU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetModule(nn.Module):
def __init__(self, in_: 'int', out: 'int'):
super().__init__()
self.l1 = Conv3BN(in_, out)
self.l2 = Conv3BN(out, out)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_': 4, 'out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.backends.cudnn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0507009873554805
tmp6 = tmp2 * tmp5
tmp7 = 1.0
tmp8 = tmp2 * tmp7
tmp9 = libdevice.expm1(tmp8)
tmp10 = 1.7580993408473766
tmp11 = tmp9 * tmp10
tmp12 = tl.where(tmp4, tmp6, tmp11)
tl.store(in_out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_elu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf3
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class Conv3BN(nn.Module):
def __init__(self, in_: 'int', out: 'int', bn=False):
super().__init__()
self.conv = conv3x3(in_, out)
self.bn = nn.BatchNorm2d(out) if bn else None
self.activation = nn.SELU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetModuleNew(nn.Module):
def __init__(self, in_: 'int', out: 'int'):
super().__init__()
self.l1 = Conv3BN(in_, out)
self.l2 = Conv3BN(out, out)
def forward(self, input_0):
primals_1 = self.l1.conv.weight
primals_2 = self.l1.conv.bias
primals_4 = self.l2.conv.weight
primals_5 = self.l2.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ArmenGhambaryan/kaggle_carvana_segmentation
|
UNetModule
| false
| 13,296
|
[
"MIT"
] | 447
|
648a6b5c807cb69011316fe6501241dacc027db2
|
https://github.com/ArmenGhambaryan/kaggle_carvana_segmentation/tree/648a6b5c807cb69011316fe6501241dacc027db2
|
OffsetNet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3u/c3u6zmp5plplv5tjyzlxet5sgcucpeizysbhi7xphxjhdc6kmodq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qs/cqsk6a76egdyzr4pbwvfkopx7r76mm6pjq7rxndgddfyegzfpbgy.py
# Topologically Sorted Source Nodes: [sigmoid, sub, x_5], Original ATen: [aten.sigmoid, aten.sub, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# sub => sub
# x_5 => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {})
triton_poi_fused_mul_sigmoid_sub_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.5
tmp3 = tmp1 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, sub, x_5], Original ATen: [aten.sigmoid, aten.sub, aten.mul]
triton_poi_fused_mul_sigmoid_sub_2.run(buf5, buf6, 4, grid=grid(4), stream=stream0)
return (buf6, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf3, buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.5
tmp3 = tmp1 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_mul_sigmoid_sub_2[grid(4)](buf5, buf6, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return buf6, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), buf3, buf5, primals_6, primals_4
class OffsetNetNew(nn.Module):
"""OffsetNet in Temporal interlace module.
The OffsetNet consists of one convolution layer and two fc layers
with a relu activation following with a sigmoid function. Following
the convolution layer, two fc layers and relu are applied to the output.
Then, apply the sigmoid function with a multiply factor and a minus 0.5
to transform the output to (-4, 4).
Args:
in_channels (int): Channel num of input features.
groups (int): Number of groups for fc layer outputs.
num_segments (int): Number of frame segments.
"""
def __init__(self, in_channels, groups, num_segments):
super().__init__()
self.sigmoid = nn.Sigmoid()
kernel_size = 3
padding = 1
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc1 = nn.Linear(num_segments, num_segments)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(num_segments, groups)
self.init_weights()
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
self.fc2.bias.data[...] = 0.5108
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
HypnosXC/mmaction2
|
OffsetNet
| false
| 13,827
|
[
"Apache-2.0"
] | 549
|
a26d5f981449445a5e22a0a60d8b285e06c3dd6e
|
https://github.com/HypnosXC/mmaction2/tree/a26d5f981449445a5e22a0a60d8b285e06c3dd6e
|
BinaryExpAbs
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/56/c56vvxvdetpcy56d3maotjwmokjqe4xycu455zjdunrtp4yae7sm.py
# Topologically Sorted Source Nodes: [neg, sub, abs_1, mul, exp], Original ATen: [aten.neg, aten.sub, aten.abs, aten.mul, aten.exp]
# Source node to ATen node mapping:
# abs_1 => abs_1
# exp => exp
# mul => mul
# neg => neg
# sub => sub
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%primals_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select, %select_1), kwargs = {})
# %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %abs_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
triton_poi_fused_abs_exp_mul_neg_sub_0 = async_compile.triton('triton_poi_fused_abs_exp_mul_neg_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_exp_mul_neg_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_exp_mul_neg_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = -tmp5
tmp7 = tmp6 * tmp3
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr1 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, sub, abs_1, mul, exp], Original ATen: [aten.neg, aten.sub, aten.abs, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_exp_mul_neg_sub_0.run(primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
return (buf1, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_exp_mul_neg_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = -tmp5
tmp7 = tmp6 * tmp3
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_exp_mul_neg_sub_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
return buf1, buf0, buf1
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpAbsNew(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Johnsonms/NNI_master
|
BinaryExpAbs
| false
| 11,563
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
ScaledL2Norm
|
import torch
import torch.onnx
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledL2Norm(nn.Module):
def __init__(self, in_channels, initial_scale):
super(ScaledL2Norm, self).__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def forward(self, x):
return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2
).unsqueeze(3)
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'initial_scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class ScaledL2NormNew(nn.Module):
def __init__(self, in_channels, initial_scale):
super(ScaledL2NormNew, self).__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def forward(self, input_0):
primals_2 = self.scale
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
mirecta/pytorch-ssd
|
ScaledL2Norm
| false
| 12,784
|
[
"MIT"
] | 0
|
360f31bfff12f2954c9166dc78df038334a01c53
|
https://github.com/mirecta/pytorch-ssd/tree/360f31bfff12f2954c9166dc78df038334a01c53
|
SqueezeAndExcitationModule
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vm/cvmsgl3j6cbrs4yvta6w5vjhmu7huh75tkw6spu64gpnevd5ytcj.py
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# scale => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p5/cp5us67vtvgvci2fdnd64iojdyp4orzux4324j6czewko4qz35ue.py
# Topologically Sorted Source Nodes: [scale_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# scale_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_out_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr0 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp6, None)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/az/cazousalzuqn73ciahz5izvogzu4ekcsktal4tthjvwjd3cqdayz.py
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# scale_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fs/cfs4qiqhjuq4rjjmrtzf2cjre423zu5vqtebwr5o6wc6rv52ml7a.py
# Topologically Sorted Source Nodes: [scale_4, x], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# scale_4 => sigmoid
# x => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 1), (0, 1, 0), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 1, 1), (1, 1, 1))
buf2 = reinterpret_tensor(buf1, (1, 1), (1, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((1, 1), (1, 1), torch.bool)
# Topologically Sorted Source Nodes: [scale_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf6, 1, grid=grid(1), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 1, 1), (0, 0, 0), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 1), (4, 1, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 4, grid=grid(4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale_4, x], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf4, buf5, 16, grid=grid(16), stream=stream0)
return (buf5, primals_1, primals_2, primals_4, reinterpret_tensor(buf0, (1, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf2, (1, 1, 1), (1, 1, 1), 0), buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_out_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr0 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp6, None)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(4)](primals_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 1
), (0, 1, 0), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (1, 1, 1), (1, 1, 1))
buf2 = reinterpret_tensor(buf1, (1, 1), (1, 1), 0)
del buf1
buf6 = empty_strided_cuda((1, 1), (1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(1)](buf2, primals_3,
buf6, 1, XBLOCK=1, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 1, 1
), (0, 0, 0), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf3, (1, 4, 1), (4, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(4)](buf4, primals_5, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(16)](primals_1, buf4, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf5, primals_1, primals_2, primals_4, reinterpret_tensor(buf0,
(1, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf2, (1, 1, 1), (1, 1,
1), 0), buf4, buf6
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * x.sigmoid()
class Conv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding='same', dilation=1, groups=1, bias=True):
super(Conv1d, self).__init__(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=0,
dilation=dilation, groups=groups, bias=bias, padding_mode='zeros')
assert padding in ['valid', 'same', 'causal']
if padding == 'valid':
self.pre_padding = None
elif padding == 'same':
self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) //
2, (kernel_size - 1) // 2), value=0)
elif padding == 'causal':
self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0
), value=0)
self.noise = None
self.vn_std = None
def init_vn(self, vn_std):
self.vn_std = vn_std
def sample_synaptic_noise(self, distributed):
self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size(
), device=self.weight.device, dtype=self.weight.dtype)
if distributed:
torch.distributed.broadcast(self.noise, 0)
def forward(self, input):
weight = self.weight
if self.noise is not None and self.training:
weight = weight + self.vn_std * self.noise
if self.pre_padding is not None:
input = self.pre_padding(input)
return F.conv1d(input, weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class SqueezeAndExcitationModuleNew(nn.Module):
"""Squeeze And Excitation Module
Args:
input_dim: input feature dimension
reduction_ratio: bottleneck reduction ratio
inner_act: bottleneck inner activation function
Input: (batch_size, in_dim, in_length)
Output: (batch_size, out_dim, out_length)
"""
def __init__(self, input_dim, reduction_ratio, inner_act='relu'):
super(SqueezeAndExcitationModuleNew, self).__init__()
assert input_dim % reduction_ratio == 0
self.conv1 = Conv1d(input_dim, input_dim // reduction_ratio,
kernel_size=1)
self.conv2 = Conv1d(input_dim // reduction_ratio, input_dim,
kernel_size=1)
assert inner_act in ['relu', 'swish']
if inner_act == 'relu':
self.inner_act = nn.ReLU()
elif inner_act == 'swish':
self.inner_act = Swish()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
gheyret/EfficientConformer
|
SqueezeAndExcitationModule
| false
| 15,436
|
[
"Apache-2.0"
] | 101
|
b28a0aaa3b182f72abaccbeb12df0402adf96097
|
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
|
ResidualBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm => add, rsqrt, var_mean
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3g/c3gbhm3y6wldudvsxdmmjh5ssg2uys5qqk3dd3k7bxnuot4xhndp.py
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6x/c6xlvvnj6ftmp7jka4547n3hpffcz5xr3op3wtbpv5povsb6rjue.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_2 => relu
# out_3 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f6/cf6z47jjg5avrtu2dn7ifr4mx5pcq57zzoksapbtrbrglr3gqnxi.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# out_4 => convolution_1
# out_5 => add_2, repeat_2, rsqrt_1, var_mean_1
# out_6 => add_4
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + (16*x0)), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask)
tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask)
tl.store(out_ptr4 + (x0), tmp25, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf11 = buf10; del buf10 # reuse
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4.run(buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, grid=grid(16), stream=stream0)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf6
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5,
buf8, buf3, buf4, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
buf11 = buf10
del buf10
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid
(16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12,
buf13, buf17, buf16, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8,
buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0),
reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlockNew(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.in1.weight
primals_5 = self.in1.bias
primals_6 = self.conv2.conv2d.weight
primals_7 = self.conv2.conv2d.bias
primals_8 = self.in2.weight
primals_9 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
EdenBD/MultiModalStory-demo
|
ResidualBlock
| false
| 13,644
|
[
"Apache-2.0"
] | 154
|
5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
|
https://github.com/EdenBD/MultiModalStory-demo/tree/5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
|
Classifier
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/32/c32vfxouqe74ea5scuzrdhpd7r6adxwu4bzarm4icjfnb47jbizg.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tx], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class ClassifierNew(nn.Module):
def __init__(self, n_hid, n_out):
super(ClassifierNew, self).__init__()
self.n_hid = n_hid
self.n_out = n_out
self.linear = nn.Linear(n_hid, n_out)
def __repr__(self):
return '{}(n_hid={}, n_out={})'.format(self.__class__.__name__,
self.n_hid, self.n_out)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FengMingquan-sjtu/pyHGT
|
Classifier
| false
| 9,034
|
[
"MIT"
] | 0
|
3ad1b10ee11358c02fa199667a80c291323e5e2d
|
https://github.com/FengMingquan-sjtu/pyHGT/tree/3ad1b10ee11358c02fa199667a80c291323e5e2d
|
Scale
|
import torch
import torch.nn as nn
class Scale(nn.Module):
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class ScaleNew(nn.Module):
def __init__(self, scale=1.0):
super(ScaleNew, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Complicateddd/Complicateddd-ROITransformer
|
Scale
| false
| 11,308
|
[
"Apache-2.0"
] | 0
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
https://github.com/Complicateddd/Complicateddd-ROITransformer/tree/2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
Flatten
|
import torch
from torch import nn
from torch.autograd import *
from itertools import product as product
from math import sqrt as sqrt
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
x = x.transpose(3, 2).contiguous()
return x.view(x.size(0), -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.autograd import *
from itertools import product as product
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64), (64, 1), 0),
class FlattenNew(nn.Module):
def __init__(self):
super(FlattenNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Aristochi/Dangerous_driving_behavior_detection
|
Flatten
| false
| 13,268
|
[
"MIT"
] | 96
|
596d0544c3ed8cbfbc322cc4cd7859a9ef539810
|
https://github.com/Aristochi/Dangerous_driving_behavior_detection/tree/596d0544c3ed8cbfbc322cc4cd7859a9ef539810
|
Attention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xl/cxlpplg3hmt6k4x6alhg4yn6eq5jppxhzrzdcvvcpbupy7pjgudn.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
# Source node to ATen node mapping:
# max_1 => max_1
# out => exp
# score_1 => div
# src_max => clamp_min
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused_clamp_div_exp_max_sub_0 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fi/cfijnjpiz4ruggqhl6zhj4ujuexfsuzxvpo26muzj4bggik4i5hl.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# max_1 => max_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/h7/ch7xj7aj6agx6frik7qd7tffe2pmrsjjensiwx2hy2md4kde7aj5.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# max_1 => max_1
# out_1 => div_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1.run(buf1, buf0, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0)
del buf2
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn.functional as F
import torch.fx
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1,
buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
buf4 = buf0
del buf0
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def restricted_softmax(src, dim: 'int'=-1, margin: 'float'=0.0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0.0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class AttentionNew(torch.nn.Module):
def __init__(self, dropout=0):
super(AttentionNew, self).__init__()
self.dropout = dropout
def compute_attention(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
HWSelf/pytorch_geometric
|
Attention
| false
| 516
|
[
"MIT"
] | 0
|
c1214de674079b5e39e57c045d0f844b60caf590
|
https://github.com/HWSelf/pytorch_geometric/tree/c1214de674079b5e39e57c045d0f844b60caf590
|
DownsampleA
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_2/inductor_cache/ut/cut6fylhkap4kiisa64e7qmjz6avx2msrgllo32mmvmznmfo5z5m.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%avg_pool2d, %mul], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0)
tmp14 = 0.0
tmp15 = tmp13 * tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = 0.0
tmp15 = tmp13 * tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DownsampleANew(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleANew, self).__init__()
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
OBA9k/Test_dev
|
DownsampleA
| false
| 17,754
|
[
"Apache-2.0"
] | 4
|
bfdd337fb56ca160e1d09b6c310d1e6037d55fcd
|
https://github.com/OBA9k/Test_dev/tree/bfdd337fb56ca160e1d09b6c310d1e6037d55fcd
|
Attention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [ref_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# ref_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/to/ctohnag3s2my72gfje47knfyigkawenmq4hwyddhmhls6qicb3io.py
# Topologically Sorted Source Nodes: [ref_1, expanded_query, add, tanh], Original ATen: [aten.convolution, aten.repeat, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# expanded_query => repeat
# ref_1 => convolution
# tanh => tanh
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {})
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 1, 4]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, %convolution), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_convolution_repeat_tanh_1 = async_compile.triton('triton_poi_fused_add_convolution_repeat_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_repeat_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 4) % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp2
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr0 + (x4), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/i5/ci57psuuueutwfqpm57dmpddhnflxjjxpqzf6cwcsnd2zbemfstl.py
# Topologically Sorted Source Nodes: [V], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# V => repeat_1
# Graph fragment:
# %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze_2, [4, 1, 1]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ref_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf1, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [ref_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [ref_1, expanded_query, add, tanh], Original ATen: [aten.convolution, aten.repeat, aten.add, aten.tanh]
triton_poi_fused_add_convolution_repeat_tanh_1.run(buf3, primals_6, buf0, buf4, 64, grid=grid(64), stream=stream0)
del primals_6
buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [V], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_7, buf5, 16, grid=grid(16), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [V, bmm], Original ATen: [aten.repeat, aten.bmm]
extern_kernels.bmm(buf5, buf4, out=buf6)
return (buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 0), primals_4, primals_5, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf4, reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_convolution_repeat_tanh_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x3 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp2
tmp5 = libdevice.tanh(tmp4)
tl.store(in_out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr0 + x4, tmp5, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(
primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf1, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf2
del buf2
buf4 = buf1
del buf1
triton_poi_fused_add_convolution_repeat_tanh_1[grid(64)](buf3,
primals_6, buf0, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf5 = reinterpret_tensor(buf0, (4, 1, 4), (4, 16, 1), 0)
del buf0
triton_poi_fused_repeat_2[grid(16)](primals_7, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(buf5, buf4, out=buf6)
return buf3, reinterpret_tensor(buf6, (4, 4), (4, 1), 0
), primals_4, primals_5, reinterpret_tensor(primals_1, (4, 4, 4), (
16, 1, 4), 0), buf4, reinterpret_tensor(buf5, (4, 4, 1), (4, 1, 4), 0)
class AttentionNew(nn.Module):
"""
Using two types of attention mechanism: "Dot" and "Bahdanau"
"""
def __init__(self, hidden_size, use_tanh=False, C=10, name='Bahdanau',
use_cuda=True):
super(AttentionNew, self).__init__()
self.use_tanh = use_tanh
self.C = C
self.name = name
if name == 'Bahdanau':
self.W_query = nn.Linear(hidden_size, hidden_size)
self.W_ref = nn.Conv1d(hidden_size, hidden_size, 1, 1)
V = torch.FloatTensor(hidden_size)
if use_cuda:
V = V
self.V = nn.Parameter(V)
self.V.data.uniform_(-(1.0 / math.sqrt(hidden_size)), 1.0 /
math.sqrt(hidden_size))
def forward(self, input_0, input_1):
primals_3 = self.V
primals_2 = self.W_query.weight
primals_6 = self.W_query.bias
primals_5 = self.W_ref.weight
primals_7 = self.W_ref.bias
primals_4 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
Lance0226/CIS700_Convex_Hull_RL
|
Attention
| false
| 2,502
|
[
"MIT"
] | 0
|
3c87e063209d535d75fde719bf17f20dd5e68635
|
https://github.com/Lance0226/CIS700_Convex_Hull_RL/tree/3c87e063209d535d75fde719bf17f20dd5e68635
|
Network
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.init as I
class Network(nn.Module):
"""
Q-network
"""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=32):
"""
Build model and Intialize it
Params
======
state_size (int) : State space size
action_size (int) : Action space size
seed (int) : Random seed
fc1_unit (int)
fc2_unit (int)
"""
super(Network, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters of the layers
xavier_normal is used.
See "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010) for details.
"""
for m in self.modules():
if isinstance(m, nn.Linear):
I.xavier_normal_(m.weight)
def forward(self, state):
"""
Forward pass state -> action
Params
======
state (Torch Tensor) [batch_size, state_size]: state vector
Returns
======
actions (Torch Tensor) [batch_size, action_size]: action values
"""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
actions = F.relu(self.fc3(x))
return actions
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init as I
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 64), (64, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3,
primals_5, buf7, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf5,
primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class NetworkNew(nn.Module):
"""
Q-network
"""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=32):
"""
Build model and Intialize it
Params
======
state_size (int) : State space size
action_size (int) : Action space size
seed (int) : Random seed
fc1_unit (int)
fc2_unit (int)
"""
super(NetworkNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters of the layers
xavier_normal is used.
See "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010) for details.
"""
for m in self.modules():
if isinstance(m, nn.Linear):
I.xavier_normal_(m.weight)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
tae-yeop/Udacity_DRLND_navigation
|
Network
| false
| 10,839
|
[
"MIT"
] | 0
|
dd4a4609c5fe3e00cb4deea3ebd9922dd0772447
|
https://github.com/tae-yeop/Udacity_DRLND_navigation/tree/dd4a4609c5fe3e00cb4deea3ebd9922dd0772447
|
ConvGelu
|
import torch
import torch.nn as nn
import torch.fx
class ConvGelu(torch.nn.Module):
def __init__(self):
super(ConvGelu, self).__init__()
self.conv = nn.Conv2d(3, 32, 3, 1)
self.gelu = nn.GELU()
def forward(self, x):
x = self.conv(x)
x = self.gelu(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.fx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 32, 62, 62), (123008, 3844, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_gelu_0[grid(492032)](buf1, primals_2,
buf2, 492032, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
class ConvGeluNew(torch.nn.Module):
def __init__(self):
super(ConvGeluNew, self).__init__()
self.conv = nn.Conv2d(3, 32, 3, 1)
self.gelu = nn.GELU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
NVIDIA/Torch-TensorRT
|
ConvGelu
| false
| 14,076
|
[
"BSD-3-Clause"
] | 430
|
1a22204fecec690bc3c2a318dab4f57b98c57f05
|
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
|
CoFusion
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class CoFusion(nn.Module):
def __init__(self, in_ch, out_ch):
super(CoFusion, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 64, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(64, out_ch, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
self.norm_layer1 = nn.GroupNorm(4, 64)
self.norm_layer2 = nn.GroupNorm(4, 64)
def forward(self, x):
attn = self.relu(self.norm_layer1(self.conv1(x)))
attn = self.relu(self.norm_layer2(self.conv2(attn)))
attn = F.softmax(self.conv3(attn), dim=1)
return (x * attn).sum(1).unsqueeze(1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r5 = rindex
x4 = xindex
r3 = rindex // 16
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r5 + 256 * x4), None)
tmp1 = tl.load(in_ptr0 + (r3 + 16 * x0), None, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r5 + 256 * x4), tmp2, None)
tl.store(out_ptr2 + x4, tmp20, None)
tl.store(out_ptr0 + x4, tmp10, None)
tl.store(out_ptr1 + x4, tmp15, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp1 / tmp7
tmp9 = tmp0 * tmp8
tmp11 = tmp2 / tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp15 = tmp4 / tmp7
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp19 = tmp6 / tmp7
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64,), (1,))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (4, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_convolution_native_group_norm_0[grid(16)](buf1,
primals_2, buf2, buf3, buf5, 16, 256, num_warps=2, num_stages=1)
del primals_2
buf6 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
triton_poi_fused_native_group_norm_relu_1[grid(4096)](buf1, buf2,
buf3, primals_4, primals_5, buf6, 4096, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 4, 4), (1024, 16, 4, 1))
buf8 = buf7
del buf7
buf9 = buf3
del buf3
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_convolution_native_group_norm_0[grid(16)](buf8,
primals_7, buf9, buf10, buf12, 16, 256, num_warps=2, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
triton_poi_fused_native_group_norm_relu_1[grid(4096)](buf8, buf9,
buf10, primals_8, primals_9, buf13, 4096, XBLOCK=128, num_warps
=4, num_stages=1)
del buf10
del primals_9
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_2[grid(256)](buf15, primals_11, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf15, buf16, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_4[grid(64)](primals_3, buf16,
buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf16
return (reinterpret_tensor(buf17, (4, 1, 4, 4), (16, 16, 4, 1), 0),
primals_1, primals_3, primals_4, primals_6, primals_8, primals_10,
buf1, reinterpret_tensor(buf2, (4, 4), (4, 1), 0),
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf8,
reinterpret_tensor(buf9, (4, 4), (4, 1), 0), reinterpret_tensor(
buf12, (4, 4), (4, 1), 0), buf13, buf15)
class CoFusionNew(nn.Module):
def __init__(self, in_ch, out_ch):
super(CoFusionNew, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 64, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(64, out_ch, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
self.norm_layer1 = nn.GroupNorm(4, 64)
self.norm_layer2 = nn.GroupNorm(4, 64)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_6 = self.conv2.weight
primals_4 = self.conv2.bias
primals_10 = self.conv3.weight
primals_11 = self.conv3.bias
primals_5 = self.norm_layer1.weight
primals_7 = self.norm_layer1.bias
primals_8 = self.norm_layer2.weight
primals_9 = self.norm_layer2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
jechague/DexiNed
|
CoFusion
| false
| 15,687
|
[
"MIT"
] | 471
|
370fe9031579b2d815ab706d7dc9daf23b969a87
|
https://github.com/jechague/DexiNed/tree/370fe9031579b2d815ab706d7dc9daf23b969a87
|
BertLayer
|
from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
scores = torch.matmul(query, torch.transpose(key, 2, 3)) / math.sqrt(
key.shape[-1])
scores = scores.masked_fill(attention_mask < 0, -10000)
normed = torch.softmax(scores, -1)
per_head = torch.matmul(normed, value)
atten = torch.cat([per_head[:, i, :, :] for i in range(per_head.
shape[1])], -1)
return atten
def forward(self, hidden_states, attention_mask):
"""
hidden_states: [bs, seq_len, hidden_state]
attention_mask: [bs, 1, 1, seq_len]
output: [bs, seq_len, hidden_state]
"""
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attention = BertSelfAttention(config)
self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size
)
self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps=
config.layer_norm_eps)
self.attention_dropout = nn.Dropout(config.hidden_dropout_prob)
self.interm_dense = nn.Linear(config.hidden_size, config.
intermediate_size)
self.interm_af = F.gelu
self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size
)
self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.out_dropout = nn.Dropout(config.hidden_dropout_prob)
def add_norm(self, input, output, dense_layer, dropout, ln_layer):
"""
input: the input
output: the input that requires the sublayer to transform
dense_layer, dropput: the sublayer
ln_layer: layer norm that takes input+sublayer(output)
"""
return ln_layer(input + dropout(dense_layer(output)))
def forward(self, hidden_states, attention_mask):
"""
hidden_states: either from the embedding layer (first bert layer) or from the previous bert layer
as shown in the left of Figure 1 of https://arxiv.org/pdf/1706.03762.pdf
each block consists of
1. a multi-head attention layer (BertSelfAttention)
2. a add-norm that takes the output of BertSelfAttention and the input of BertSelfAttention
3. a feed forward layer
4. a add-norm that takes the output of feed forward layer and the input of feed forward layer
"""
atten = self.self_attention(hidden_states, attention_mask)
norm_atten = self.add_norm(hidden_states, atten, self.
attention_dense, self.attention_dropout, self.attention_layer_norm)
interim = self.interm_af(self.interm_dense(norm_atten))
ffn = self.add_norm(norm_atten, interim, self.out_dense, self.
out_dropout, self.out_layer_norm)
return ffn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_attention_heads=4, hidden_size=
4, attention_probs_dropout_prob=0.5, layer_norm_eps=1,
hidden_dropout_prob=0.5, intermediate_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_lt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 < tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -10000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x2, tmp20, xmask)
tl.store(out_ptr1 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_out_ptr0 + x4, xmask)
tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -10000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf2
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_lt_1[grid(64)](primals_8, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5,
buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6,
buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf10, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0)
del buf7
triton_poi_fused_cat_4[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf13,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf13,
buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf17)
del primals_14
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_7[grid(64)](buf17, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22,
primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf21
del buf22
del primals_18
return (buf23, primals_1, primals_11, primals_17, buf6, buf9,
reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13,
reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17,
reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15,
primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1,
1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
scores = torch.matmul(query, torch.transpose(key, 2, 3)) / math.sqrt(
key.shape[-1])
scores = scores.masked_fill(attention_mask < 0, -10000)
normed = torch.softmax(scores, -1)
per_head = torch.matmul(normed, value)
atten = torch.cat([per_head[:, i, :, :] for i in range(per_head.
shape[1])], -1)
return atten
def forward(self, hidden_states, attention_mask):
"""
hidden_states: [bs, seq_len, hidden_state]
attention_mask: [bs, 1, 1, seq_len]
output: [bs, seq_len, hidden_state]
"""
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
class BertLayerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attention = BertSelfAttention(config)
self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size
)
self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps=
config.layer_norm_eps)
self.attention_dropout = nn.Dropout(config.hidden_dropout_prob)
self.interm_dense = nn.Linear(config.hidden_size, config.
intermediate_size)
self.interm_af = F.gelu
self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size
)
self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.out_dropout = nn.Dropout(config.hidden_dropout_prob)
def add_norm(self, input, output, dense_layer, dropout, ln_layer):
"""
input: the input
output: the input that requires the sublayer to transform
dense_layer, dropput: the sublayer
ln_layer: layer norm that takes input+sublayer(output)
"""
return ln_layer(input + dropout(dense_layer(output)))
def forward(self, input_0, input_1):
primals_2 = self.self_attention.query.weight
primals_3 = self.self_attention.query.bias
primals_4 = self.self_attention.key.weight
primals_5 = self.self_attention.key.bias
primals_6 = self.self_attention.value.weight
primals_7 = self.self_attention.value.bias
primals_9 = self.attention_dense.weight
primals_10 = self.attention_dense.bias
primals_11 = self.attention_layer_norm.weight
primals_12 = self.attention_layer_norm.bias
primals_13 = self.interm_dense.weight
primals_14 = self.interm_dense.bias
primals_15 = self.out_dense.weight
primals_16 = self.out_dense.bias
primals_17 = self.out_layer_norm.weight
primals_18 = self.out_layer_norm.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
priyamtejaswin/minbert-assignment
|
BertLayer
| false
| 10,693
|
[
"Apache-2.0"
] | 0
|
fd41a54441916a6d421640bbee910f64786b303d
|
https://github.com/priyamtejaswin/minbert-assignment/tree/fd41a54441916a6d421640bbee910f64786b303d
|
LayerNorm
|
import torch
import torch.cuda
from torch import nn
import torch.distributed
from torch.nn import LayerNorm
import torch.utils.data
import torch.optim
class LayerNorm(nn.Module):
def __init__(self, channels, eps=0.0001):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.cuda
from torch import nn
import torch.distributed
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0,
primals_2, primals_3, buf1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormNew(nn.Module):
def __init__(self, channels, eps=0.0001):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Oreoluwa1234/NeMo
|
LayerNorm
| false
| 9,702
|
[
"Apache-2.0"
] | 0
|
b01e3ceed34efe31fd43866685dbdd19a6b30928
|
https://github.com/Oreoluwa1234/NeMo/tree/b01e3ceed34efe31fd43866685dbdd19a6b30928
|
GELU
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ek/cekqmjotkcsacfdbkc7oinl4vltq6kmmxpvttcglsbg5ihsegnbb.py
# Topologically Sorted Source Nodes: [wrapped_sqrt, pow_1, mul, add, mul_1, tanh, add_1, cdf, mul_3], Original ATen: [aten.sqrt, aten.pow, aten.mul, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# cdf => mul_2
# mul => mul
# mul_1 => mul_1
# mul_3 => mul_3
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp6 * tmp5
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wrapped_sqrt, pow_1, mul, add, mul_1, tanh, add_1, cdf, mul_3], Original ATen: [aten.sqrt, aten.pow, aten.mul, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = 0.044715
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.7978845608028654
tmp7 = tmp6 * tmp5
tmp8 = libdevice.tanh(tmp7)
tmp9 = 1.0
tmp10 = tmp8 + tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tmp0 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
YNNEKUW/fairseq
|
GELU
| false
| 11,989
|
[
"MIT"
] | 0
|
ef145b330ef26e7fb76609524504ab7933b88172
|
https://github.com/YNNEKUW/fairseq/tree/ef145b330ef26e7fb76609524504ab7933b88172
|
Actor
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# a => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cp/ccp5m5apf7ka2skqyfxhf2df54c52qocprpycry7jrzoptyjvbti.py
# Topologically Sorted Source Nodes: [tanh, a_2], Original ATen: [aten.tanh, aten.mul]
# Source node to ATen node mapping:
# a_2 => mul
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {})
triton_poi_fused_mul_tanh_1 = async_compile.triton('triton_poi_fused_mul_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 16), (16, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 1024, grid=grid(1024), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 1024, grid=grid(1024), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, a_2], Original ATen: [aten.tanh, aten.mul]
triton_poi_fused_mul_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(buf3, (64, 16), (16, 1), 0), buf4, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 16), (16, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3,
primals_5, buf6, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(
buf3, (64, 16), (16, 1), 0), buf4, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, action_range):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_dim)
self.action_range = action_range
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
ikamensh/machin
|
Actor
| false
| 6,854
|
[
"MIT"
] | 1
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
https://github.com/ikamensh/machin/tree/af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
HausdorffLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/uf/cuf2n554zsgrrdxmi7emtplbgjplti2rucbziwhh3mlinlefs6nd.py
# Topologically Sorted Source Nodes: [d2_matrix], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# d2_matrix => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sqrt, %sqrt_1, %sqrt_2, %sqrt_3],), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (4*x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.load(in_ptr0 + (1 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp14 = tl.load(in_ptr0 + (2 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.load(in_ptr0 + (3 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (3 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp4, tmp24, tmp25)
tmp27 = tmp0 >= tmp3
tmp28 = tl.full([1], 8, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr0 + (16 + (4*((-4) + x1))), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr1 + (16 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 - tmp32
tmp34 = tmp33 * tmp33
tmp35 = tl.load(in_ptr0 + (17 + (4*((-4) + x1))), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr1 + (17 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp34 + tmp38
tmp40 = tl.load(in_ptr0 + (18 + (4*((-4) + x1))), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tl.load(in_ptr1 + (18 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tmp39 + tmp43
tmp45 = tl.load(in_ptr0 + (19 + (4*((-4) + x1))), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (19 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tmp45 - tmp46
tmp48 = tmp47 * tmp47
tmp49 = tmp44 + tmp48
tmp50 = libdevice.sqrt(tmp49)
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp30, tmp50, tmp51)
tmp53 = tmp0 >= tmp28
tmp54 = tl.full([1], 12, tl.int64)
tmp55 = tmp0 < tmp54
tmp56 = tmp53 & tmp55
tmp57 = tl.load(in_ptr0 + (32 + (4*((-8) + x1))), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr1 + (32 + (4*x0)), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp59 = tmp57 - tmp58
tmp60 = tmp59 * tmp59
tmp61 = tl.load(in_ptr0 + (33 + (4*((-8) + x1))), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr1 + (33 + (4*x0)), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp63 = tmp61 - tmp62
tmp64 = tmp63 * tmp63
tmp65 = tmp60 + tmp64
tmp66 = tl.load(in_ptr0 + (34 + (4*((-8) + x1))), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp67 = tl.load(in_ptr1 + (34 + (4*x0)), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp68 = tmp66 - tmp67
tmp69 = tmp68 * tmp68
tmp70 = tmp65 + tmp69
tmp71 = tl.load(in_ptr0 + (35 + (4*((-8) + x1))), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp72 = tl.load(in_ptr1 + (35 + (4*x0)), tmp56 & xmask, eviction_policy='evict_last', other=0.0)
tmp73 = tmp71 - tmp72
tmp74 = tmp73 * tmp73
tmp75 = tmp70 + tmp74
tmp76 = libdevice.sqrt(tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp56, tmp76, tmp77)
tmp79 = tmp0 >= tmp54
tmp80 = tl.full([1], 16, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tl.load(in_ptr0 + (48 + (4*((-12) + x1))), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr1 + (48 + (4*x0)), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp84 = tmp82 - tmp83
tmp85 = tmp84 * tmp84
tmp86 = tl.load(in_ptr0 + (49 + (4*((-12) + x1))), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp87 = tl.load(in_ptr1 + (49 + (4*x0)), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp88 = tmp86 - tmp87
tmp89 = tmp88 * tmp88
tmp90 = tmp85 + tmp89
tmp91 = tl.load(in_ptr0 + (50 + (4*((-12) + x1))), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp92 = tl.load(in_ptr1 + (50 + (4*x0)), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp93 = tmp91 - tmp92
tmp94 = tmp93 * tmp93
tmp95 = tmp90 + tmp94
tmp96 = tl.load(in_ptr0 + (51 + (4*((-12) + x1))), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp97 = tl.load(in_ptr1 + (51 + (4*x0)), tmp79 & xmask, eviction_policy='evict_last', other=0.0)
tmp98 = tmp96 - tmp97
tmp99 = tmp98 * tmp98
tmp100 = tmp95 + tmp99
tmp101 = libdevice.sqrt(tmp100)
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp79, tmp101, tmp102)
tmp104 = tl.where(tmp56, tmp78, tmp103)
tmp105 = tl.where(tmp30, tmp52, tmp104)
tmp106 = tl.where(tmp4, tmp26, tmp105)
tl.store(out_ptr0 + (x2), tmp106, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/el/celctfu4wbdfgiomns6zpsypz5fnhalcl57xq3mfaseeoxdoxxjb.py
# Topologically Sorted Source Nodes: [min_1, min_2, term_1, term_2, res, mul], Original ATen: [aten.min, aten.mean, aten.add, aten.mul]
# Source node to ATen node mapping:
# min_1 => min_1
# min_2 => min_2
# mul => mul
# res => add
# term_1 => mean
# term_2 => mean_1
# Graph fragment:
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view, 2), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view, 1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%getitem, [-1]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%getitem_2, [-1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {})
triton_poi_fused_add_mean_min_mul_1 = async_compile.triton('triton_poi_fused_add_mean_min_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_min_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_min_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = tmp6 + tmp13
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tmp14 + tmp21
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp27 = triton_helpers.minimum(tmp25, tmp26)
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp22 + tmp29
tmp31 = 4.0
tmp32 = tmp30 / tmp31
tmp33 = triton_helpers.minimum(tmp0, tmp7)
tmp34 = triton_helpers.minimum(tmp33, tmp15)
tmp35 = triton_helpers.minimum(tmp34, tmp23)
tmp36 = triton_helpers.minimum(tmp1, tmp8)
tmp37 = triton_helpers.minimum(tmp36, tmp16)
tmp38 = triton_helpers.minimum(tmp37, tmp24)
tmp39 = tmp35 + tmp38
tmp40 = triton_helpers.minimum(tmp3, tmp10)
tmp41 = triton_helpers.minimum(tmp40, tmp18)
tmp42 = triton_helpers.minimum(tmp41, tmp26)
tmp43 = tmp39 + tmp42
tmp44 = triton_helpers.minimum(tmp5, tmp12)
tmp45 = triton_helpers.minimum(tmp44, tmp20)
tmp46 = triton_helpers.minimum(tmp45, tmp28)
tmp47 = tmp43 + tmp46
tmp48 = tmp47 / tmp31
tmp49 = tmp32 + tmp48
tmp50 = 1.0
tmp51 = tmp49 * tmp50
tl.store(in_out_ptr0 + (x0), tmp51, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [d2_matrix], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [min_1, min_2, term_1, term_2, res, mul], Original ATen: [aten.min, aten.mean, aten.add, aten.mul]
triton_poi_fused_add_mean_min_mul_1.run(buf3, buf0, 4, grid=grid(4), stream=stream0)
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + 4 * x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp14 = tl.load(in_ptr0 + (2 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.load(in_ptr0 + (3 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp4, tmp24, tmp25)
tmp27 = tmp0 >= tmp3
tmp28 = tl.full([1], 8, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr0 + (16 + 4 * (-4 + x1)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr1 + (16 + 4 * x0), tmp30 & xmask, eviction_policy
='evict_last', other=0.0)
tmp33 = tmp31 - tmp32
tmp34 = tmp33 * tmp33
tmp35 = tl.load(in_ptr0 + (17 + 4 * (-4 + x1)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr1 + (17 + 4 * x0), tmp30 & xmask, eviction_policy
='evict_last', other=0.0)
tmp37 = tmp35 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp34 + tmp38
tmp40 = tl.load(in_ptr0 + (18 + 4 * (-4 + x1)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tl.load(in_ptr1 + (18 + 4 * x0), tmp30 & xmask, eviction_policy
='evict_last', other=0.0)
tmp42 = tmp40 - tmp41
tmp43 = tmp42 * tmp42
tmp44 = tmp39 + tmp43
tmp45 = tl.load(in_ptr0 + (19 + 4 * (-4 + x1)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (19 + 4 * x0), tmp30 & xmask, eviction_policy
='evict_last', other=0.0)
tmp47 = tmp45 - tmp46
tmp48 = tmp47 * tmp47
tmp49 = tmp44 + tmp48
tmp50 = libdevice.sqrt(tmp49)
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp30, tmp50, tmp51)
tmp53 = tmp0 >= tmp28
tmp54 = tl.full([1], 12, tl.int64)
tmp55 = tmp0 < tmp54
tmp56 = tmp53 & tmp55
tmp57 = tl.load(in_ptr0 + (32 + 4 * (-8 + x1)), tmp56 & xmask,
eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr1 + (32 + 4 * x0), tmp56 & xmask, eviction_policy
='evict_last', other=0.0)
tmp59 = tmp57 - tmp58
tmp60 = tmp59 * tmp59
tmp61 = tl.load(in_ptr0 + (33 + 4 * (-8 + x1)), tmp56 & xmask,
eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr1 + (33 + 4 * x0), tmp56 & xmask, eviction_policy
='evict_last', other=0.0)
tmp63 = tmp61 - tmp62
tmp64 = tmp63 * tmp63
tmp65 = tmp60 + tmp64
tmp66 = tl.load(in_ptr0 + (34 + 4 * (-8 + x1)), tmp56 & xmask,
eviction_policy='evict_last', other=0.0)
tmp67 = tl.load(in_ptr1 + (34 + 4 * x0), tmp56 & xmask, eviction_policy
='evict_last', other=0.0)
tmp68 = tmp66 - tmp67
tmp69 = tmp68 * tmp68
tmp70 = tmp65 + tmp69
tmp71 = tl.load(in_ptr0 + (35 + 4 * (-8 + x1)), tmp56 & xmask,
eviction_policy='evict_last', other=0.0)
tmp72 = tl.load(in_ptr1 + (35 + 4 * x0), tmp56 & xmask, eviction_policy
='evict_last', other=0.0)
tmp73 = tmp71 - tmp72
tmp74 = tmp73 * tmp73
tmp75 = tmp70 + tmp74
tmp76 = libdevice.sqrt(tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp56, tmp76, tmp77)
tmp79 = tmp0 >= tmp54
tl.full([1], 16, tl.int64)
tmp82 = tl.load(in_ptr0 + (48 + 4 * (-12 + x1)), tmp79 & xmask,
eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr1 + (48 + 4 * x0), tmp79 & xmask, eviction_policy
='evict_last', other=0.0)
tmp84 = tmp82 - tmp83
tmp85 = tmp84 * tmp84
tmp86 = tl.load(in_ptr0 + (49 + 4 * (-12 + x1)), tmp79 & xmask,
eviction_policy='evict_last', other=0.0)
tmp87 = tl.load(in_ptr1 + (49 + 4 * x0), tmp79 & xmask, eviction_policy
='evict_last', other=0.0)
tmp88 = tmp86 - tmp87
tmp89 = tmp88 * tmp88
tmp90 = tmp85 + tmp89
tmp91 = tl.load(in_ptr0 + (50 + 4 * (-12 + x1)), tmp79 & xmask,
eviction_policy='evict_last', other=0.0)
tmp92 = tl.load(in_ptr1 + (50 + 4 * x0), tmp79 & xmask, eviction_policy
='evict_last', other=0.0)
tmp93 = tmp91 - tmp92
tmp94 = tmp93 * tmp93
tmp95 = tmp90 + tmp94
tmp96 = tl.load(in_ptr0 + (51 + 4 * (-12 + x1)), tmp79 & xmask,
eviction_policy='evict_last', other=0.0)
tmp97 = tl.load(in_ptr1 + (51 + 4 * x0), tmp79 & xmask, eviction_policy
='evict_last', other=0.0)
tmp98 = tmp96 - tmp97
tmp99 = tmp98 * tmp98
tmp100 = tmp95 + tmp99
tmp101 = libdevice.sqrt(tmp100)
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp79, tmp101, tmp102)
tmp104 = tl.where(tmp56, tmp78, tmp103)
tmp105 = tl.where(tmp30, tmp52, tmp104)
tmp106 = tl.where(tmp4, tmp26, tmp105)
tl.store(out_ptr0 + x2, tmp106, xmask)
@triton.jit
def triton_poi_fused_add_mean_min_mul_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = tmp6 + tmp13
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tmp14 + tmp21
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp27 = triton_helpers.minimum(tmp25, tmp26)
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp22 + tmp29
tmp31 = 4.0
tmp32 = tmp30 / tmp31
tmp33 = triton_helpers.minimum(tmp0, tmp7)
tmp34 = triton_helpers.minimum(tmp33, tmp15)
tmp35 = triton_helpers.minimum(tmp34, tmp23)
tmp36 = triton_helpers.minimum(tmp1, tmp8)
tmp37 = triton_helpers.minimum(tmp36, tmp16)
tmp38 = triton_helpers.minimum(tmp37, tmp24)
tmp39 = tmp35 + tmp38
tmp40 = triton_helpers.minimum(tmp3, tmp10)
tmp41 = triton_helpers.minimum(tmp40, tmp18)
tmp42 = triton_helpers.minimum(tmp41, tmp26)
tmp43 = tmp39 + tmp42
tmp44 = triton_helpers.minimum(tmp5, tmp12)
tmp45 = triton_helpers.minimum(tmp44, tmp20)
tmp46 = triton_helpers.minimum(tmp45, tmp28)
tmp47 = tmp43 + tmp46
tmp48 = tmp47 / tmp31
tmp49 = tmp32 + tmp48
tmp50 = 1.0
tmp51 = tmp49 * tmp50
tl.store(in_out_ptr0 + x0, tmp51, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = buf1
del buf1
triton_poi_fused_add_mean_min_mul_1[grid(4)](buf3, buf0, 4, XBLOCK=
4, num_warps=1, num_stages=1)
del buf0
return buf3,
class HausdorffLossNew(nn.Module):
def __init__(self, loss_weight=1.0):
super(HausdorffLossNew, self).__init__()
self.weight = loss_weight
def cdist(self, x, y):
"""
Compute distance between each pair of the two collections of inputs.
:param x: Nxd Tensor
:param y: Mxd Tensor
:res: NxM matrix where dist[i,j] is the norm between x[i,:] and y[j,:],
i.e. dist[i,j] = ||x[i,:]-y[j,:]||
"""
differences = x.unsqueeze(1) - y.unsqueeze(0)
distances = torch.sum(differences ** 2, -1).sqrt()
return distances
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LiWentomng/boxlevelset
|
HausdorffLoss
| false
| 8,477
|
[
"Apache-2.0"
] | 25
|
8cc40bf6ae4a343c482c676c72259cc12c29d31c
|
https://github.com/LiWentomng/boxlevelset/tree/8cc40bf6ae4a343c482c676c72259cc12c29d31c
|
ScaleUp
|
import torch
import torch.nn as nn
from torch.nn import Parameter
class ScaleUp(nn.Module):
"""ScaleUp"""
def __init__(self, scale):
super(ScaleUp, self).__init__()
self.scale = Parameter(torch.tensor(scale))
def forward(self, x):
return x * self.scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf0, primals_2
class ScaleUpNew(nn.Module):
"""ScaleUp"""
def __init__(self, scale):
super(ScaleUpNew, self).__init__()
self.scale = Parameter(torch.tensor(scale))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
NTDXYG/DeepPseudo
|
ScaleUp
| false
| 17,724
|
[
"Apache-2.0"
] | 7
|
0d89045ea145f23259306eb024e9bbe261f33d9b
|
https://github.com/NTDXYG/DeepPseudo/tree/0d89045ea145f23259306eb024e9bbe261f33d9b
|
LSTM
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/zb/czbbllwh7sfg4givymrrkjxzw23nmw5ismoyqt4debm22krcdkqc.py
# Topologically Sorted Source Nodes: [h_0], Original ATen: [aten.zero]
# Source node to ATen node mapping:
# h_0 => full
# Graph fragment:
# %full : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_zero_0 = async_compile.triton('triton_poi_fused_zero_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zero_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/gu/cgumvpbq5erg3rnqxec3fd4jo7ptcsj3kczlww57elnxgqgsibht.py
# Topologically Sorted Source Nodes: [sigmoid, mul, sigmoid_1, tanh, mul_1, c_1, sigmoid_2, tanh_1, h_1], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward]
# Source node to ATen node mapping:
# c_1 => add_1
# h_1 => mul_2
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# sigmoid_1 => sigmoid_1
# sigmoid_2 => sigmoid_2
# tanh => tanh
# tanh_1 => tanh_1
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %full), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_3,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %tanh), kwargs = {})
# %add_1 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_2,), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_73 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_19), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp25 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp26 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = 0.0
tmp19 = tmp17 * tmp18
tmp20 = tmp5 * tmp11
tmp21 = tmp19 + tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp17
tmp24 = tmp17 * tmp23
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = libdevice.tanh(tmp21)
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp21, xmask)
tl.store(out_ptr3 + (x2), tmp24, xmask)
tl.store(out_ptr4 + (x2), tmp30, xmask)
tl.store(out_ptr5 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/gq/cgqwwez7hgnau2ufccutodv4pz77aowb3qzpfc3ym7ivb7snhuqp.py
# Topologically Sorted Source Nodes: [sigmoid_3, mul_3, sigmoid_4, tanh_2, mul_4, c_2, sigmoid_5, tanh_3, h_2], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add]
# Source node to ATen node mapping:
# c_2 => add_3
# h_2 => mul_5
# mul_3 => mul_3
# mul_4 => mul_4
# sigmoid_3 => sigmoid_3
# sigmoid_4 => sigmoid_4
# sigmoid_5 => sigmoid_5
# tanh_2 => tanh_2
# tanh_3 => tanh_3
# Graph fragment:
# %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_4,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %add_1), kwargs = {})
# %sigmoid_4 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_5,), kwargs = {})
# %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_7,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_4, %tanh_2), kwargs = {})
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %sigmoid_5 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_6,), kwargs = {})
# %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {})
# %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_5, %tanh_3), kwargs = {})
triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp18 = tl.load(in_ptr3 + (x2), xmask)
tmp22 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp23 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp19 = tmp5 * tmp18
tmp20 = tmp11 * tmp17
tmp21 = tmp19 + tmp20
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tl.sigmoid(tmp26)
tmp28 = libdevice.tanh(tmp21)
tmp29 = tmp27 * tmp28
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp17, xmask)
tl.store(out_ptr3 + (x2), tmp21, xmask)
tl.store(out_ptr4 + (x2), tmp27, xmask)
tl.store(out_ptr5 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/p4/cp4hc27y5z2efbcvfuh3f2ytub6r2x3mg2tag432n5zqm32vjxl6.py
# Topologically Sorted Source Nodes: [sigmoid_9, mul_9, sigmoid_10, tanh_6, mul_10, c_4, tanh_7], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add]
# Source node to ATen node mapping:
# c_4 => add_7
# mul_10 => mul_10
# mul_9 => mul_9
# sigmoid_10 => sigmoid_10
# sigmoid_9 => sigmoid_9
# tanh_6 => tanh_6
# tanh_7 => tanh_7
# Graph fragment:
# %sigmoid_9 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_12,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_9, %add_5), kwargs = {})
# %sigmoid_10 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_13,), kwargs = {})
# %tanh_6 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_15,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_10, %tanh_6), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {})
# %tanh_7 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {})
triton_poi_fused_add_mul_sigmoid_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp18 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp19 = tmp5 * tmp18
tmp20 = tmp11 * tmp17
tmp21 = tmp19 + tmp20
tmp22 = libdevice.tanh(tmp21)
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp17, xmask)
tl.store(out_ptr3 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/7n/c7ndokn6ji4llxnabzyjlads6rh4x5ey476tt2px3zc4d23m2qgt.py
# Topologically Sorted Source Nodes: [sigmoid_11], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid_11 => sigmoid_11
# Graph fragment:
# %sigmoid_11 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_14,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/yv/cyv52nqg474urnhaqatxvbklhih33ignejyc4cwxxhp2jytcbskj.py
# Topologically Sorted Source Nodes: [c_n], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# c_n => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_1, %add_3, %add_5, %add_7],), kwargs = {})
triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr2 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp23 = tl.load(in_ptr5 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp15, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/ai/cai7gbb6ayhln7jiow4pkqlopuq3kycdei7elu6sopif4qdkhilf.py
# Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# h_n => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %mul_5, %mul_8, %mul_11],), kwargs = {})
triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tl.where(tmp14, tmp15, tmp23)
tmp25 = tl.where(tmp9, tmp10, tmp24)
tmp26 = tl.where(tmp4, tmp5, tmp25)
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_0], Original ATen: [aten.zero]
stream0 = get_raw_stream(0)
triton_poi_fused_zero_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, sigmoid_1, tanh, mul_1, c_1, sigmoid_2, tanh_1, h_1], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward]
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1.run(buf1, primals_2, buf2, buf3, buf4, buf5, buf33, buf6, buf7, 16, grid=grid(16), stream=stream0)
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, primals_3, out=buf8)
buf9 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_4, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_3, mul_3, sigmoid_4, tanh_2, mul_4, c_2, sigmoid_5, tanh_3, h_2], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf8, primals_2, buf9, buf5, buf10, buf11, buf12, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, primals_3, out=buf16)
buf17 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_4, out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_6, mul_6, sigmoid_7, tanh_4, mul_7, c_3, sigmoid_8, tanh_5, h_3], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf16, primals_2, buf17, buf13, buf18, buf19, buf20, buf21, buf22, buf23, 16, grid=grid(16), stream=stream0)
buf24 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf23, primals_3, out=buf24)
buf25 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12), primals_4, out=buf25)
del primals_4
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_9, mul_9, sigmoid_10, tanh_6, mul_10, c_4, tanh_7], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_3.run(buf24, primals_2, buf25, buf21, buf26, buf27, buf28, buf30, 16, grid=grid(16), stream=stream0)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid_11], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf24, primals_2, buf25, buf29, 16, grid=grid(16), stream=stream0)
del primals_2
buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [c_n], Original ATen: [aten.stack]
triton_poi_fused_stack_5.run(buf5, buf13, buf21, buf26, buf27, buf28, buf31, 64, grid=grid(64), stream=stream0)
buf32 = reinterpret_tensor(buf24, (16, 4), (4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack]
triton_poi_fused_stack_6.run(buf7, buf15, buf23, buf29, buf30, buf32, 64, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf32, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf31, (4, 4, 4), (4, 16, 1), 0), buf0, buf3, buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf14, buf18, buf19, buf20, buf21, buf22, buf26, buf27, buf28, buf29, buf30, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), reinterpret_tensor(buf23, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf33, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4,
out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp26 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp18 = 0.0
tmp19 = tmp17 * tmp18
tmp20 = tmp5 * tmp11
tmp21 = tmp19 + tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp17
tmp24 = tmp17 * tmp23
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.sigmoid(tmp29)
tmp31 = libdevice.tanh(tmp21)
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp21, xmask)
tl.store(out_ptr3 + x2, tmp24, xmask)
tl.store(out_ptr4 + x2, tmp30, xmask)
tl.store(out_ptr5 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp18 = tl.load(in_ptr3 + x2, xmask)
tmp22 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp19 = tmp5 * tmp18
tmp20 = tmp11 * tmp17
tmp21 = tmp19 + tmp20
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tl.sigmoid(tmp26)
tmp28 = libdevice.tanh(tmp21)
tmp29 = tmp27 * tmp28
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp17, xmask)
tl.store(out_ptr3 + x2, tmp21, xmask)
tl.store(out_ptr4 + x2, tmp27, xmask)
tl.store(out_ptr5 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp18 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp19 = tmp5 * tmp18
tmp20 = tmp11 * tmp17
tmp21 = tmp19 + tmp20
tmp22 = libdevice.tanh(tmp21)
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp17, xmask)
tl.store(out_ptr3 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr2 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp23 = tl.load(in_ptr5 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp15, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tl.where(tmp14, tmp15, tmp23)
tmp25 = tl.where(tmp9, tmp10, tmp24)
tmp26 = tl.where(tmp4, tmp5, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zero_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0),
primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1
, primals_2, buf2, buf3, buf4, buf5, buf33, buf6, buf7, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf8 = buf2
del buf2
extern_kernels.mm(buf7, primals_3, out=buf8)
buf9 = buf1
del buf1
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4),
primals_4, out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_2,
buf9, buf5, buf10, buf11, buf12, buf13, buf14, buf15, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf16 = buf9
del buf9
extern_kernels.mm(buf15, primals_3, out=buf16)
buf17 = buf8
del buf8
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8),
primals_4, out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf16, primals_2,
buf17, buf13, buf18, buf19, buf20, buf21, buf22, buf23, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf24 = buf17
del buf17
extern_kernels.mm(buf23, primals_3, out=buf24)
buf25 = buf16
del buf16
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12
), primals_4, out=buf25)
del primals_4
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf24, primals_2,
buf25, buf21, buf26, buf27, buf28, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_sigmoid_4[grid(16)](buf24, primals_2, buf25, buf29,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0)
del buf25
triton_poi_fused_stack_5[grid(64)](buf5, buf13, buf21, buf26, buf27,
buf28, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf32 = reinterpret_tensor(buf24, (16, 4), (4, 1), 0)
del buf24
triton_poi_fused_stack_6[grid(64)](buf7, buf15, buf23, buf29, buf30,
buf32, 64, XBLOCK=64, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf32, (4, 4, 4), (4, 16, 1), 0),
reinterpret_tensor(buf31, (4, 4, 4), (4, 16, 1), 0), buf0, buf3,
buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf14, buf18, buf19,
buf20, buf21, buf22, buf26, buf27, buf28, buf29, buf30,
reinterpret_tensor(primals_1, (4, 4), (1, 16), 12),
reinterpret_tensor(primals_3, (16, 4), (1, 16), 0),
reinterpret_tensor(buf23, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf15, (4, 4), (
1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4),
reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf33,
reinterpret_tensor(primals_1, (4, 4), (1, 16), 0))
class LSTMNew(nn.Module):
"""Implementation of the standard LSTM.
TODO: Include ref and LaTeX equations
Parameters
----------
input_size : int
Number of input features
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self, input_size: 'int', hidden_size: 'int', batch_first:
'bool'=True, initial_forget_bias: 'int'=0):
super(LSTMNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 4 *
hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 *
hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size))
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 4)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, input_0):
primals_3 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
|
Flash-Of-Thunder/testing
|
LSTM
| false
| 8,159
|
[
"Apache-2.0"
] | 18
|
36366e2cd32756fb07abc533ecbb7672a4738bc6
|
https://github.com/Flash-Of-Thunder/testing/tree/36366e2cd32756fb07abc533ecbb7672a4738bc6
|
ECA
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a4/ca4pk5bet2ap5zhlcm7x5qhkcykaonjnkexgc274irmy5c3x7nr6.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %primals_1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf2, primals_1, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4
), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_1[grid(256)](buf2, primals_1, buf3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4),
(4, 1, 1), 0), buf2
class FastGlobalAvgPool2d:
def __init__(self, flatten=False):
self.flatten = flatten
def __call__(self, x):
if self.flatten:
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
else:
return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0),
x.size(1), 1, 1)
class ECANew(nn.Module):
def __init__(self, k_size=3):
super().__init__()
self.avg_pool = FastGlobalAvgPool2d(flatten=False)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1
) // 2, bias=False)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
cooked-sashimi/Yet-Another-YOLOv4-Pytorch
|
ECA
| false
| 15,087
|
[
"MIT"
] | 133
|
c884ef8849987a75b0e17eba1b739c22d3782e90
|
https://github.com/cooked-sashimi/Yet-Another-YOLOv4-Pytorch/tree/c884ef8849987a75b0e17eba1b739c22d3782e90
|
Biaffine
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/am/cambydvhijs7vhod3hwt5aje7cqigviqetkgm6iccyrgbwhmprcb.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 320, grid=grid(320), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 5), (20, 5, 1), 0), reinterpret_tensor(primals_3, (16, 5, 5), (0, 5, 1), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf2, 320, grid=grid(320), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(buf1, reinterpret_tensor(buf2, (16, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 5), (20, 5, 1), 0), reinterpret_tensor(buf0, (16, 5, 4), (20, 1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4, 5), (20, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 5), (20, 5, 1),
0), reinterpret_tensor(primals_3, (16, 5, 5), (0, 5, 1), 0),
out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(320)](primals_2, buf2, 320, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf1, reinterpret_tensor(buf2, (16, 5, 4), (20,
1, 5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf2, (16, 4, 5), (20, 5, 1), 0
), reinterpret_tensor(buf0, (16, 5, 4), (20, 1, 5), 0)
class BiaffineNew(nn.Module):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super(BiaffineNew, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def extra_repr(self):
info = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
info += f', bias_x={self.bias_x}'
if self.bias_y:
info += f', bias_y={self.bias_y}'
return info
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CNLPT/lightNLP
|
Biaffine
| false
| 13,451
|
[
"Apache-2.0"
] | 889
|
c7f128422ba5b16f514bb294145cb3b562e95829
|
https://github.com/CNLPT/lightNLP/tree/c7f128422ba5b16f514bb294145cb3b562e95829
|
Down
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/a5/ca5lhdbrikf3aciicsnwhrcvxvtq5eogksndk4rqc55mn5qu55ri.py
# Topologically Sorted Source Nodes: [conv2d, c], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# c => expm1, gt, mul, mul_2, where
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, c], Original ATen: [aten.convolution, aten.elu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_elu_0[grid(64)](buf1, primals_2, buf2,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
class DownNew(nn.Module):
def __init__(self, in_channels, out_channels, factor=2):
super(DownNew, self).__init__()
self.down = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=factor, padding=1)
def forward(self, input_0):
primals_1 = self.down.weight
primals_2 = self.down.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Smith42/unet-pytorch
|
Down
| false
| 9,551
|
[
"MIT"
] | 0
|
45a0459da69cee7f57fb369a8e2fc58668d81167
|
https://github.com/Smith42/unet-pytorch/tree/45a0459da69cee7f57fb369a8e2fc58668d81167
|
NoiseBlock
|
import torch
import torch.nn as nn
import torch.jit
class NoiseBlock(nn.Module):
def __init__(self, sigma):
super(NoiseBlock, self).__init__()
self.sigma = sigma
def forward(self, x):
out = x + self.sigma * torch.randn_like(x)
return out
def set_sigma(self, x):
self.sigma = x
return 1
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.jit
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](buf2, arg0_1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf2,
class NoiseBlockNew(nn.Module):
def __init__(self, sigma):
super(NoiseBlockNew, self).__init__()
self.sigma = sigma
def set_sigma(self, x):
self.sigma = x
return 1
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
shuj1234/Hopfield-ODE
|
NoiseBlock
| false
| 10,816
|
[
"MIT"
] | 0
|
2b770c0141082174f394b189df725088308d8bdd
|
https://github.com/shuj1234/Hopfield-ODE/tree/2b770c0141082174f394b189df725088308d8bdd
|
CrossEntropy
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/x6/cx6zqrxmcyv5ocpxdnu4bm3ja2qtsm6l7476r5bdkkbd6pzhjuyn.py
# Topologically Sorted Source Nodes: [argmax, cross_entropy], Original ATen: [aten.argmax, aten.nll_loss2d_forward]
# Source node to ATen node mapping:
# argmax => argmax
# cross_entropy => convert_element_type, div, full_default_1, ne_1, ne_2, neg, sum_2, sum_3, where_1
# Graph fragment:
# %argmax : [num_users=4] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, -1), kwargs = {})
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
# %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type), kwargs = {})
triton_per_fused_argmax_nll_loss2d_forward_1 = async_compile.triton('triton_per_fused_argmax_nll_loss2d_forward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_argmax_nll_loss2d_forward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_argmax_nll_loss2d_forward_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr1 + (r1 + (64*r2)), None)
tmp58 = tl.load(in_ptr1 + (16 + r1 + (64*r2)), None)
tmp61 = tl.load(in_ptr1 + (32 + r1 + (64*r2)), None)
tmp64 = tl.load(in_ptr1 + (48 + r1 + (64*r2)), None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4), "index out of bounds: 0 <= tmp53 < 4")
tmp55 = tl.load(in_ptr1 + (r1 + (16*tmp53) + (64*r2)), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp79 = tmp78.to(tl.float32)
tmp80 = tmp74 / tmp79
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp80, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [argmax, cross_entropy], Original ATen: [aten.argmax, aten.nll_loss2d_forward]
triton_per_fused_argmax_nll_loss2d_forward_1.run(buf4, arg0_1, buf1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused_argmax_nll_loss2d_forward_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr1 + (r1 + 64 * r2), None)
tmp58 = tl.load(in_ptr1 + (16 + r1 + 64 * r2), None)
tmp61 = tl.load(in_ptr1 + (32 + r1 + 64 * r2), None)
tmp64 = tl.load(in_ptr1 + (48 + r1 + 64 * r2), None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4),
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (r1 + 16 * tmp53 + 64 * r2), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp79 = tmp78.to(tl.float32)
tmp80 = tmp74 / tmp79
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp80, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_argmax_nll_loss2d_forward_1[grid(1)](buf4, arg0_1,
buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf1
return buf4,
class CrossEntropyNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
tgxs002/1-stage-wseg
|
CrossEntropy
| false
| 4,474
|
[
"Apache-2.0"
] | 0
|
de16c51cc6cf8cd0ef248145980434d5f6104910
|
https://github.com/tgxs002/1-stage-wseg/tree/de16c51cc6cf8cd0ef248145980434d5f6104910
|
Conv2d
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/z3/cz3vliqlpgih6ihwoaxl6cmnicfmv2ygutcuphilcsragp3evc57.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class Conv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
relu=True, same_padding=False, bn=False):
super(Conv2dNew, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0,
affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ChrisKonishi/multi-stream-crowd-counting-extended
|
Conv2d
| false
| 262
|
[
"MIT"
] | 0
|
4b1590499bd93ac09e62c4c7760b88ae92e6b301
|
https://github.com/ChrisKonishi/multi-stream-crowd-counting-extended/tree/4b1590499bd93ac09e62c4c7760b88ae92e6b301
|
down_shifted_conv2d
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lk/clkvmfnc5ppj6ffcl2v3tvac6pbvz3y7mizzrdi65zokiejjhua3.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 0], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 5
x0 = xindex % 6
x2 = (xindex // 30)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-1) + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp2 & tmp4
tmp8 = tmp7 & tmp6
tmp9 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp8 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2f/c2f6w3l6ahh7othacouiu2lpi7hja2epfoatgx6qgnjkgptctuad.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_per_fused__weight_norm_interface_1 = async_compile.triton('triton_per_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (24*x0)), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (24*x0)), tmp9, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 2, 3), (24, 6, 3, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 480, grid=grid(480), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 2, 3), (24, 6, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_1.run(buf2, primals_3, primals_2, buf3, 4, 24, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf5, buf3, primals_2, primals_3, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 2, 3), (24, 6, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm as wn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 5
x0 = xindex % 6
x2 = xindex // 30
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -1 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp2 & tmp4
tmp8 = tmp7 & tmp6
tmp9 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp8 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 24
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 24 * x0), tmp9, rmask & xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 2, 3), (24, 6, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 6), (120, 30, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(480)](primals_1, buf0, 480,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 2, 3), (24, 6, 3, 1), torch.float32)
triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3,
primals_2, buf3, 4, 24, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3
def down_shift(x, pad=None):
xs = [int(y) for y in x.size()]
x = x[:, :, :xs[2] - 1, :]
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class down_shifted_conv2dNew(nn.Module):
def __init__(self, num_filters_in, num_filters_out, filter_size=(2, 3),
stride=(1, 1), shift_output_down=False, norm='weight_norm'):
super(down_shifted_conv2dNew, self).__init__()
assert norm in [None, 'batch_norm', 'weight_norm']
self.conv = nn.Conv2d(num_filters_in, num_filters_out, filter_size,
stride)
self.shift_output_down = shift_output_down
self.norm = norm
self.pad = nn.ZeroPad2d((int((filter_size[1] - 1) / 2), int((
filter_size[1] - 1) / 2), filter_size[0] - 1, 0))
if norm == 'weight_norm':
self.conv = wn(self.conv)
elif norm == 'batch_norm':
self.bn = nn.BatchNorm2d(num_filters_out)
if shift_output_down:
self.down_shift = lambda x: down_shift(x, pad=nn.ZeroPad2d((0,
0, 1, 0)))
def forward(self, input_0):
primals_4 = self.conv.bias
primals_2 = self.conv.weight_g
primals_3 = self.conv.weight_v
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
andiac/pixel-cnn-pp
|
down_shifted_conv2d
| false
| 6,207
|
[
"MIT"
] | 1
|
3ba856320e40208cbb6e9cac3e66a739f148903e
|
https://github.com/andiac/pixel-cnn-pp/tree/3ba856320e40208cbb6e9cac3e66a739f148903e
|
Conv2dBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cs/ccsak2aq2focic3gvi5yypd2u37w22ixutbqzqc6vdjhrk4zppac.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 144, grid=grid(144), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(144)](buf1,
primals_2, buf2, 144, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b).type_as(x)
running_var = self.running_var.repeat(b).type_as(x)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-05, affine=True, fp16=False):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
self.fp16 = fp16
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
if x.type() == 'torch.cuda.HalfTensor':
mean = x.view(-1).float().mean().view(*shape)
std = x.view(-1).float().std().view(*shape)
mean = mean.half()
std = std.half()
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class Conv2dBlockNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
0, norm='none', activation='relu', pad_type='zero', dilation=1,
fp16=False):
super(Conv2dBlockNew, self).__init__()
self.use_bias = True
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim, fp16=fp16)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if norm == 'sn':
self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim,
kernel_size, stride, dilation=dilation, bias=self.use_bias))
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size,
stride, padding=1, dilation=dilation, bias=self.use_bias)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ricklentz/Seg-Uncertainty
|
Conv2dBlock
| false
| 16,322
|
[
"MIT"
] | 298
|
82fd7056cccb265b3fc3e8a90338866661cab230
|
https://github.com/ricklentz/Seg-Uncertainty/tree/82fd7056cccb265b3fc3e8a90338866661cab230
|
LR_PAD
|
import torch
import torch.nn as nn
def lr_pad(x, padding=1):
""" Pad left/right-most to each other instead of zero padding """
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class LR_PAD(nn.Module):
""" Pad left/right-most to each other instead of zero padding """
def __init__(self, padding=1):
super(LR_PAD, self).__init__()
self.padding = padding
def forward(self, x):
return lr_pad(x, self.padding)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (4 * x1 + (-1 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def lr_pad(x, padding=1):
""" Pad left/right-most to each other instead of zero padding """
return torch.cat([x[..., -padding:], x, x[..., :padding]], dim=3)
class LR_PADNew(nn.Module):
""" Pad left/right-most to each other instead of zero padding """
def __init__(self, padding=1):
super(LR_PADNew, self).__init__()
self.padding = padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
zokin/HorizonNet
|
LR_PAD
| false
| 4,673
|
[
"MIT"
] | 0
|
a93a76ec7fdc76a5ba023adaed869e34f7f3cea4
|
https://github.com/zokin/HorizonNet/tree/a93a76ec7fdc76a5ba023adaed869e34f7f3cea4
|
UnaryPrimitivesPredefined_v2
|
import math
import torch
from torch import nn
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class UnaryPrimitivesPredefined_v2(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['acc', 'pos_z', 'speed', 'dist_to_ball']
self.distance = Distance()
self.ineqs.update({'acc': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'pos_z': Inequality(out_dim=cmp_dim, distribution=
'uniform'), 'speed': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'dist_to_ball': Inequality(out_dim=cmp_dim,
distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, state_dim]
return [batch, length, n_agents, out_dim]
"""
velocity = self.differential(states)
acc = self.differential(velocity)
n_agents = states.size(2)
p1 = states.unsqueeze(2).repeat(1, 1, n_agents, 1, 1)
p2 = states.unsqueeze(3).repeat(1, 1, 1, n_agents, 1)
dist = self.distance(p1, p2).squeeze(4)
ineqs_inputs = {'pos_z': states[:, :, 1:, 2], 'speed': torch.norm(
velocity[:, :, 1:, :], p=2, dim=3), 'acc': torch.norm(acc[:, :,
1:, :], p=2, dim=3), 'dist_to_ball': dist[:, :, 0, 1:]}
output = torch.cat([self.ineqs[k](ineqs_inputs[k], beta, **kwargs) for
k in self.primitive_list], dim=-1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp17 = tmp0 >= tmp13
tl.full([1], 6, tl.int64)
tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp20 * tmp6
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp17 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 - tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tl.where(tmp15, tmp16, tmp25)
tmp27 = tl.where(tmp4, tmp11, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 6
x0 = xindex % 16
x2 = xindex // 96
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (32 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + 16 * x1 + 96 * x2), tmp4 & xmask, other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = 2.0
tmp11 = tmp9 * tmp10
tmp12 = tl.load(in_ptr0 + (48 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp13 = tl.load(in_ptr0 + (16 + x0 + 16 * x1 + 96 * x2), tmp4 & xmask,
other=0.0)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp8
tmp16 = tmp11 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (32 + x0 + 16 * (-1 + x1) + 96 * x2), tmp22 &
xmask, other=0.0)
tmp24 = tl.load(in_ptr0 + (x0 + 16 * (-1 + x1) + 96 * x2), tmp22 &
xmask, other=0.0)
tmp25 = tmp23 - tmp24
tmp26 = tmp25 * tmp8
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp22, tmp26, tmp27)
tmp29 = tmp0 >= tmp20
tl.full([1], 6, tl.int64)
tmp32 = tl.load(in_ptr0 + (80 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (48 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp8
tmp36 = tmp35 * tmp10
tmp37 = tl.load(in_ptr0 + (64 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp38 = tl.load(in_ptr0 + (32 + x0 + 16 * (-5 + x1) + 96 * x2), tmp29 &
xmask, other=0.0)
tmp39 = tmp37 - tmp38
tmp40 = tmp39 * tmp8
tmp41 = tmp36 - tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp29, tmp41, tmp42)
tmp44 = tl.where(tmp22, tmp28, tmp43)
tmp45 = tl.where(tmp4, tmp18, tmp44)
tl.store(out_ptr0 + x3, tmp45, xmask)
@triton.jit
def triton_poi_fused_div_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x2 = xindex // 12
x3 = xindex
tmp0 = tl.load(in_ptr0 + (36 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (37 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (38 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (39 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (7 + 4 * x0 + 16 * x1 + 96 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + 0)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp28 = tl.load(in_ptr1 + 1)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp21
tmp23 = tmp17 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp27 = tmp24 - tmp26
tmp30 = tmp27 / tmp29
tl.store(in_out_ptr0 + x3, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + 1)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp0 - tmp2
tmp6 = tmp5 - tmp2
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = tmp3 / tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_div_sub_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x2 = xindex // 12
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 + x0
) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (5 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 +
x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (6 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 +
x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 4 * x0 + 16 * x1 + 16 * ((1 + x0) // 16) +
64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (3 + 4 * ((1 + x0) // 4) + 16 * x1 + 16 * ((1 +
x0) // 16) + 64 * x2 + 64 * ((1 + x0 + 16 * x1) // 64)), xmask,
eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + 0)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr1 + 1)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp22 = tmp19 - tmp21
tmp25 = tmp22 / tmp24
tl.store(in_out_ptr0 + x3, tmp25, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1920
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 40
x1 = xindex // 40
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = tl.sigmoid(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 20, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr2 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr3 + (-10 + x0), tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp8
tmp21 = tl.sigmoid(tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tmp0 >= tmp14
tmp25 = tl.full([1], 30, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tmp24 & tmp26
tmp28 = tl.load(in_ptr4 + x1, tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr5 + (-20 + x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tmp28 - tmp29
tmp31 = tmp30 * tmp8
tmp32 = tl.sigmoid(tmp31)
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp27, tmp32, tmp33)
tmp35 = tmp0 >= tmp25
tl.full([1], 40, tl.int64)
tmp38 = tl.load(in_ptr6 + x1, tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp39 = tl.load(in_ptr7 + (-30 + x0), tmp35 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp40 = tmp38 - tmp39
tmp41 = tmp40 * tmp8
tmp42 = tl.sigmoid(tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp35, tmp42, tmp43)
tmp45 = tl.where(tmp27, tmp34, tmp44)
tmp46 = tl.where(tmp16, tmp23, tmp45)
tmp47 = tl.where(tmp4, tmp12, tmp46)
tl.store(out_ptr0 + x2, tmp47, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (10,), (1,))
assert_size_stride(primals_4, (2,), (1,))
assert_size_stride(primals_5, (10,), (1,))
assert_size_stride(primals_6, (2,), (1,))
assert_size_stride(primals_7, (10,), (1,))
assert_size_stride(primals_8, (2,), (1,))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
triton_poi_fused_cat_1[grid(384)](buf0, buf1, 384, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32)
buf3 = reinterpret_tensor(buf2, (4, 4, 3, 1), (12, 3, 1, 1), 0)
del buf2
triton_poi_fused_div_sub_2[grid(48)](buf3, buf1, primals_2, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_2
buf4 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 1), torch.float32)
triton_poi_fused_add_div_sub_3[grid(48)](primals_1, primals_4, buf4,
48, XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
buf5 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32)
buf6 = reinterpret_tensor(buf5, (4, 4, 3, 1), (12, 3, 1, 1), 0)
del buf5
triton_poi_fused_div_sub_2[grid(48)](buf6, buf0, primals_6, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_6
buf7 = empty_strided_cuda((4, 4, 3, 1), (12, 3, 1, 48), torch.float32)
buf8 = reinterpret_tensor(buf7, (4, 4, 3, 1), (12, 3, 1, 1), 0)
del buf7
triton_poi_fused_div_sub_4[grid(48)](buf8, primals_1, primals_8, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_8
buf9 = empty_strided_cuda((4, 4, 3, 40), (480, 120, 40, 1), torch.
float32)
triton_poi_fused_cat_5[grid(1920)](buf3, primals_3, buf4, primals_5,
buf6, primals_7, buf8, primals_9, buf9, 1920, XBLOCK=128,
num_warps=4, num_stages=1)
return (buf9, primals_3, primals_5, primals_7, primals_9, buf3, buf4,
buf6, buf8)
def apply_last_dim(model, x):
size = list(x.size())
y = model(x.contiguous().view(-1, size[-1]))
size[-1] = y.size(-1)
y = y.view(torch.Size(size))
return y
def get_int_dim_index(name):
if isinstance(name, int):
return name
name_list = 'axyz'
assert name in name_list
return [i for i in range(len(name_list)) if name_list[i] == name][0] - 1
class Length(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
def forward(self, states, dim_index=None):
if dim_index is None:
dim_index = self.dim_index
if isinstance(dim_index, int):
dim_index = [dim_index]
else:
dim_index = [get_int_dim_index(x) for x in dim_index]
if -1 in dim_index:
def extractor(x):
return torch.sqrt(torch.sum(x * x, dim=1, keepdim=True))
else:
def extractor(x):
return torch.sqrt(torch.sum(x[:, dim_index].pow(2), dim=1,
keepdim=True))
return apply_last_dim(extractor, states)
def show(self, name='Length', indent=0, log=print, **kwargs):
log(' ' * indent + "- %s(x) = |x's dim %s|" % (name, str(self.
dim_index)))
class Distance(nn.Module):
def __init__(self, dim_index=-1):
super().__init__()
self.dim_index = dim_index
self.length = Length(dim_index)
def forward(self, states1, states2, dim_index=None):
return self.length(states1 - states2, dim_index)
def show(self, name='Distance', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x1, x2) = |x1 - x2|' % name)
class Normalize(nn.Module):
def __init__(self, distribution=None, **kwargs):
super().__init__()
self.distribution = distribution
self.data_ = []
if distribution is None:
pass
elif distribution == 'normal':
mean = kwargs['mean'] if 'mean' in kwargs else 0
std = kwargs['std'] if 'std' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([mean, std]), False)
elif distribution == 'uniform':
vmin = kwargs['minv'] if 'minv' in kwargs else 0
vmax = kwargs['maxv'] if 'maxv' in kwargs else 1
self.param = nn.Parameter(torch.Tensor([vmin, vmax]), False)
else:
raise NotImplementedError()
def forward(self, x, keep_data=False):
if keep_data:
self.data_.append(x.detach().cpu().view(-1))
return x
if self.distribution is None:
return x
elif self.distribution == 'normal':
mean = self.param[0]
std = self.param[1]
return (x - mean) / std
elif self.distribution == 'uniform':
vmin = self.param[0]
vmax = self.param[1]
return (x - vmin) / (vmax - vmin + 1e-05)
else:
raise NotImplementedError()
def reset_parameters(self, name=None):
assert len(self.data_) > 0
data = torch.cat(self.data_)
self.data_ = []
if self.distribution is None:
pass
elif self.distribution == 'normal':
with torch.no_grad():
self.param[0] = data.mean().item()
self.param[1] = data.std().item()
if name is not None:
None
elif self.distribution == 'uniform':
with torch.no_grad():
self.param[0] = data.min().item()
self.param[1] = data.max().item()
if name is not None:
None
else:
raise NotImplementedError()
def recover_threshold(self, x):
if self.distribution is None:
return x
elif self.distribution == 'normal':
return x * float(self.param[1]) + float(self.param[0])
elif self.distribution == 'uniform':
return x * float(self.param[1] - self.param[0] + 1e-05) + float(
self.param[0])
else:
raise NotImplementedError()
def init_thresholds(self, x):
if self.distribution is None:
nn.init.normal_(x, 0, 1)
elif self.distribution == 'normal':
nn.init.normal_(x, 0, 1)
elif self.distribution == 'uniform':
nn.init.uniform_(x, 0, 1)
else:
raise NotImplementedError()
class SoftCmp(nn.Module):
"""
Sigmoid((x - y) / e^beta)
"""
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x, y, beta):
return self.sigmoid((x - y) / math.exp(beta))
class Inequality(nn.Module):
def __init__(self, out_dim=1, distribution=None, **kwargs):
super().__init__()
self.out_dim = out_dim
self.thresholds = nn.Parameter(torch.zeros(out_dim), requires_grad=True
)
self.distribution = distribution
self.normalize = Normalize(distribution)
self.cmp = SoftCmp()
self.normalize.init_thresholds(self.thresholds)
def forward(self, states, beta=0, **kwargs):
"""
:param states: [batch, length, n_agents, ... ]
"""
states_expand = states.view(*(states.size() + (1,)))
estimate_parameters = 'estimate_parameters' in kwargs and kwargs[
'estimate_parameters']
states_expand = self.normalize(states_expand, keep_data=
estimate_parameters)
return self.cmp(states_expand, self.thresholds.view(*([1] * len(
states.size()) + [self.out_dim])), beta)
def reset_parameters(self, parameter_name, name=None):
if parameter_name == 'primitive_inequality':
self.normalize.reset_parameters(name=name)
self.normalize.init_thresholds(self.thresholds)
def get_descriptions(self, name='Inequality'):
theta = self.thresholds.detach().cpu().view(self.out_dim)
descroptions = []
for k in range(theta.size(0)):
t = self.normalize.recover_threshold(theta[k])
if 'speed' in name:
t = t * 8
if 'acc' in name:
t = t * 64
descroptions.append('%s > %.2lf' % (name, t))
return descroptions
class N_aryPrimitivesPredefined(nn.Module):
def __init__(self):
super().__init__()
self.out_dim = 0
self.primitive_list = []
self.ineqs = nn.ModuleDict({})
def reset_parameters(self, parameter_name):
for k in self.primitive_list:
self.ineqs[k].reset_parameters(parameter_name, name=k)
def get_descriptions(self):
descriptions = []
for k in self.primitive_list:
descriptions += self.ineqs[k].get_descriptions(name=k)
return descriptions
class AlignDifferential(nn.Module):
def __init__(self):
super().__init__()
def new_length(self, length):
return length
def forward(self, states):
"""
:param states: [batch, length, *]
"""
padded_states = torch.cat([states[:, 0:1] * 2 - states[:, 1:2],
states, states[:, -1:] * 2 - states[:, -2:-1]], dim=1)
return (padded_states[:, 2:] - padded_states[:, :-2]) / 2
def show(self, name='AlignDifferential', indent=0, log=print, **kwargs):
log(' ' * indent + '- %s(x) = AlignDifferential()' % (name,))
class UnaryPrimitivesPredefined_v2New(N_aryPrimitivesPredefined):
def __init__(self, cmp_dim=10):
super().__init__()
self.differential = AlignDifferential()
self.primitive_list = ['acc', 'pos_z', 'speed', 'dist_to_ball']
self.distance = Distance()
self.ineqs.update({'acc': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'pos_z': Inequality(out_dim=cmp_dim, distribution=
'uniform'), 'speed': Inequality(out_dim=cmp_dim, distribution=
'normal'), 'dist_to_ball': Inequality(out_dim=cmp_dim,
distribution='normal')})
self.out_dim = sum([self.ineqs[k].out_dim for k in self.primitive_list]
)
def forward(self, input_0):
primals_3 = self.ineqs.acc.thresholds
primals_2 = self.ineqs.acc.normalize.param
primals_5 = self.ineqs.pos_z.thresholds
primals_4 = self.ineqs.pos_z.normalize.param
primals_7 = self.ineqs.speed.thresholds
primals_6 = self.ineqs.speed.normalize.param
primals_9 = self.ineqs.dist_to_ball.thresholds
primals_8 = self.ineqs.dist_to_ball.normalize.param
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
C-SUNSHINE/TOQ-Nets-PyTorch-Release
|
UnaryPrimitivesPredefined_v2
| false
| 17,169
|
[
"MIT"
] | 6
|
05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
https://github.com/C-SUNSHINE/TOQ-Nets-PyTorch-Release/tree/05e06bf633fb3c6b610dda9a5126ecd7af1db02f
|
FuseUnit
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [Fcat], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# Fcat => convolution
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3c/c3cezzr3hm56fdyxh26ze3t7vwbb64jgp7ely4wbuzjw6s7cpand.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_2 = async_compile.triton('triton_poi_fused_reflection_pad2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/rt/crtt7wgmsypzfqem6b65yqnnbjptbdaot3qkadlzvro4kxtms5kx.py
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_11, %primals_12, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/by/cbyey6w5vaxqoh4k5sqeylvlthwneqfizkbtb4fvmfyh4jpgvgcw.py
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad_1 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_5]), kwargs = {})
triton_poi_fused_reflection_pad2d_4 = async_compile.triton('triton_poi_fused_reflection_pad2d_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_9/inductor_cache/3y/c3ysdiyim5rxhvykl3y6ybcqp4o65shloq3shbm7eb4igmwh7qbk.py
# Topologically Sorted Source Nodes: [F1, F2, fusion1, fusion3, fusion5, add, add_1, fusion, clamp, mul, sub, clamp_1, mul_1, add_2], Original ATen: [aten.convolution, aten.sigmoid, aten.add, aten.div, aten.clamp, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# F1 => convolution_1
# F2 => convolution_2
# add => add
# add_1 => add_1
# add_2 => add_2
# clamp => clamp_max, clamp_min
# clamp_1 => clamp_max_1, clamp_min_1
# fusion => div
# fusion1 => sigmoid
# fusion3 => sigmoid_1
# fusion5 => sigmoid_2
# mul => mul
# mul_1 => mul_1
# sub => sub_8
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %primals_7, %primals_8, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_3,), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {})
# %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_5,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %sigmoid_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %sigmoid_2), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %convolution_1), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_8, 0), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, %convolution_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5 = async_compile.triton('triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl.sigmoid(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = 0.3333333333333333
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = 1.0
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tmp19 * tmp2
tmp21 = tmp18 - tmp15
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = triton_helpers.minimum(tmp22, tmp18)
tmp24 = tmp23 * tmp5
tmp25 = tmp20 + tmp24
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(in_out_ptr1 + (x3), tmp5, xmask)
tl.store(out_ptr0 + (x3), tmp25, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, ), (1, ))
assert_size_stride(primals_11, (1, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_12, (1, ), (1, ))
assert_size_stride(primals_13, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_14, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [Fcat], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [Fcat], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [F1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_1, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_2.run(buf2, buf9, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 4, 4), (16, 16, 4, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf11, primals_12, 64, grid=grid(64), stream=stream0)
del primals_12
buf12 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_4.run(buf2, buf12, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_13, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 1, 4, 4), (16, 16, 4, 1))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf14, primals_14, 64, grid=grid(64), stream=stream0)
del primals_14
# Topologically Sorted Source Nodes: [F2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_2, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf2, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf8, primals_10, 64, grid=grid(64), stream=stream0)
del primals_10
buf4 = buf3; del buf3 # reuse
buf6 = buf5; del buf5 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [F1, F2, fusion1, fusion3, fusion5, add, add_1, fusion, clamp, mul, sub, clamp_1, mul_1, add_2], Original ATen: [aten.convolution, aten.sigmoid, aten.add, aten.div, aten.clamp, aten.mul, aten.rsub]
triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5.run(buf4, buf6, primals_6, primals_8, buf8, buf11, buf14, buf15, 256, grid=grid(256), stream=stream0)
del primals_6
del primals_8
return (buf15, primals_1, primals_2, primals_3, primals_5, primals_7, primals_9, primals_11, primals_13, buf0, buf2, buf4, buf6, buf8, buf9, buf11, buf12, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_4(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tl.sigmoid(tmp6)
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl.sigmoid(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = 0.3333333333333333
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = 1.0
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tmp19 * tmp2
tmp21 = tmp18 - tmp15
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = triton_helpers.minimum(tmp22, tmp18)
tmp24 = tmp23 * tmp5
tmp25 = tmp20 + tmp24
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp25, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (1, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_12, (1,), (1,))
assert_size_stride(primals_13, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_14, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(primals_1, primals_5, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_2[grid(576)](buf2, buf9, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_11, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 1, 4, 4), (16, 16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_3[grid(64)](buf11, primals_12, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
buf12 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
triton_poi_fused_reflection_pad2d_4[grid(1024)](buf2, buf12, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf12, primals_13, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 1, 4, 4), (16, 16, 4, 1))
buf14 = buf13
del buf13
triton_poi_fused_convolution_3[grid(64)](buf14, primals_14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf5 = extern_kernels.convolution(primals_2, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = extern_kernels.convolution(buf2, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_convolution_3[grid(64)](buf8, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
buf4 = buf3
del buf3
buf6 = buf5
del buf5
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_convolution_div_mul_rsub_sigmoid_5[grid(256)
](buf4, buf6, primals_6, primals_8, buf8, buf11, buf14, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
del primals_8
return (buf15, primals_1, primals_2, primals_3, primals_5, primals_7,
primals_9, primals_11, primals_13, buf0, buf2, buf4, buf6, buf8,
buf9, buf11, buf12, buf14)
class FuseUnitNew(nn.Module):
def __init__(self, channels):
super(FuseUnitNew, self).__init__()
self.proj1 = nn.Conv2d(2 * channels, channels, (1, 1))
self.proj2 = nn.Conv2d(channels, channels, (1, 1))
self.proj3 = nn.Conv2d(channels, channels, (1, 1))
self.fuse1x = nn.Conv2d(channels, 1, (1, 1), stride=1)
self.fuse3x = nn.Conv2d(channels, 1, (3, 3), stride=1)
self.fuse5x = nn.Conv2d(channels, 1, (5, 5), stride=1)
self.pad3x = nn.ReflectionPad2d((1, 1, 1, 1))
self.pad5x = nn.ReflectionPad2d((2, 2, 2, 2))
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_3 = self.proj1.weight
primals_4 = self.proj1.bias
primals_5 = self.proj2.weight
primals_6 = self.proj2.bias
primals_7 = self.proj3.weight
primals_8 = self.proj3.bias
primals_9 = self.fuse1x.weight
primals_10 = self.fuse1x.bias
primals_11 = self.fuse3x.weight
primals_12 = self.fuse3x.bias
primals_13 = self.fuse5x.weight
primals_14 = self.fuse5x.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
sugi-san/PAMA
|
FuseUnit
| false
| 13,005
|
[
"MIT"
] | 0
|
95141ebf0d3b61828a0e545f989f96b8ef569f34
|
https://github.com/sugi-san/PAMA/tree/95141ebf0d3b61828a0e545f989f96b8ef569f34
|
SACActorNetwork
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [features1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# features1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [features1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf6, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [features2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6
class SACActorNetworkNew(nn.Module):
def __init__(self, input_shape, output_shape, n_features, **kwargs):
super(SACActorNetworkNew, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h1 = nn.Linear(n_input, n_features)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_output)
nn.init.xavier_uniform_(self._h1.weight, gain=nn.init.
calculate_gain('relu'))
nn.init.xavier_uniform_(self._h2.weight, gain=nn.init.
calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3.weight, gain=nn.init.
calculate_gain('linear'))
def forward(self, input_0):
primals_2 = self._h1.weight
primals_3 = self._h1.bias
primals_4 = self._h2.weight
primals_5 = self._h2.bias
primals_6 = self._h3.weight
primals_7 = self._h3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
jacarvalho/mushroom-rl-benchmark
|
SACActorNetwork
| false
| 12,542
|
[
"MIT"
] | 0
|
5bc2e9b1a12be33827d6edcd5c5ad49571e11275
|
https://github.com/jacarvalho/mushroom-rl-benchmark/tree/5bc2e9b1a12be33827d6edcd5c5ad49571e11275
|
Intensity_Loss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c2/cc2qguppytitowkggpy2ygiaspymls64fdi3ylxdpikmo7ftpwvb.py
# Topologically Sorted Source Nodes: [sub, pow_1, abs_1, mean], Original ATen: [aten.sub, aten.pow, aten.abs, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# mean => mean
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%pow_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_abs_mean_pow_sub_0 = async_compile.triton('triton_per_fused_abs_mean_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, abs_1, mean], Original ATen: [aten.sub, aten.pow, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_mean_pow_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp7 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class Intensity_LossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ChmarsLuo/Hero_anomaly_prediction
|
Intensity_Loss
| false
| 4,986
|
[
"Apache-2.0"
] | 1
|
dba2322dabb3476466e296db6c316fc08e0cb11d
|
https://github.com/ChmarsLuo/Hero_anomaly_prediction/tree/dba2322dabb3476466e296db6c316fc08e0cb11d
|
ConvertPointsToHomogeneous
|
import torch
import torch.nn as nn
def convert_points_to_homogeneous(points):
"""Function that converts points from Euclidean to homogeneous space.
See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_to_homogeneous(input) # BxNx4
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return nn.functional.pad(points, (0, 1), 'constant', 1.0)
class ConvertPointsToHomogeneous(nn.Module):
"""Creates a transformation to convert points from Euclidean to
homogeneous space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N+1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsToHomogeneous()
>>> output = transform(input) # BxNx4
"""
def __init__(self):
super(ConvertPointsToHomogeneous, self).__init__()
def forward(self, input):
return convert_points_to_homogeneous(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=1.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(320)](arg0_1, buf0, 320,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def convert_points_to_homogeneous(points):
"""Function that converts points from Euclidean to homogeneous space.
See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = tgm.convert_points_to_homogeneous(input) # BxNx4
"""
if not torch.is_tensor(points):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(points)))
if len(points.shape) < 2:
raise ValueError('Input must be at least a 2D tensor. Got {}'.
format(points.shape))
return nn.functional.pad(points, (0, 1), 'constant', 1.0)
class ConvertPointsToHomogeneousNew(nn.Module):
"""Creates a transformation to convert points from Euclidean to
homogeneous space.
Args:
points (Tensor): tensor of N-dimensional points.
Returns:
Tensor: tensor of N+1-dimensional points.
Shape:
- Input: :math:`(B, D, N)` or :math:`(D, N)`
- Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> transform = tgm.ConvertPointsToHomogeneous()
>>> output = transform(input) # BxNx4
"""
def __init__(self):
super(ConvertPointsToHomogeneousNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DoJing/frankmocap
|
ConvertPointsToHomogeneous
| false
| 11,368
|
[
"BSD-3-Clause"
] | 0
|
ac2ddc5a75a885ede5068a25049ca2bfe9330576
|
https://github.com/DoJing/frankmocap/tree/ac2ddc5a75a885ede5068a25049ca2bfe9330576
|
GRUCell
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gf/cgf3hryjsxgzb57oshiz7s6ujo5ns5gx2upact3rpcvywun7rykf.py
# Topologically Sorted Source Nodes: [g, sigmoid], Original ATen: [aten.add, aten.sigmoid]
# Source node to ATen node mapping:
# g => add
# sigmoid => sigmoid
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %primals_4), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
triton_poi_fused_add_sigmoid_1 = async_compile.triton('triton_poi_fused_add_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jy/cjywulvj7qgkkfkp6qym4x6ye3pqbsq35iv3w3soeqljl2qyindl.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mul], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((8*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 * tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mt/cmtj73wllxw7xcrynbblslpmxstlea5vimzuyc6upsbov4eh2eqa.py
# Topologically Sorted Source Nodes: [c, mul_1, sub, tanh, mul_2, h], Original ATen: [aten.add, aten.mul, aten.rsub, aten.tanh]
# Source node to ATen node mapping:
# c => add_1
# h => add_2
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# tanh => tanh
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_6), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %primals_2), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %getitem_1), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_rsub_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp0 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tmp4 + tmp9
tl.store(out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8, ), (1, ))
assert_size_stride(primals_5, (8, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_3, out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [g, sigmoid], Original ATen: [aten.add, aten.sigmoid]
triton_poi_fused_add_sigmoid_1.run(buf2, primals_4, 32, grid=grid(32), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, buf2, primals_2, buf3, 32, grid=grid(32), stream=stream0)
del primals_1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm]
extern_kernels.mm(buf3, primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c, mul_1, sub, tanh, mul_2, h], Original ATen: [aten.add, aten.mul, aten.rsub, aten.tanh]
triton_poi_fused_add_mul_rsub_tanh_3.run(buf2, primals_2, buf4, primals_6, buf5, buf6, 16, grid=grid(16), stream=stream0)
return (buf6, primals_2, primals_6, buf2, buf4, buf5, reinterpret_tensor(buf3, (8, 4), (1, 8), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), reinterpret_tensor(buf0, (8, 4), (1, 8), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (8 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 * tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = tl.where(tmp4, tmp5, tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp0 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = libdevice.tanh(tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tmp4 + tmp9
tl.store(out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 8), (8, 1))
assert_size_stride(primals_4, (8,), (1,))
assert_size_stride(primals_5, (8, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_add_sigmoid_1[grid(32)](buf2, primals_4, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_2[grid(32)](primals_1, buf2, primals_2, buf3,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf3, primals_5, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_tanh_3[grid(16)](buf2, primals_2,
buf4, primals_6, buf5, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
return buf6, primals_2, primals_6, buf2, buf4, buf5, reinterpret_tensor(
buf3, (8, 4), (1, 8), 0), reinterpret_tensor(primals_5, (4, 8), (1,
4), 0), reinterpret_tensor(buf0, (8, 4), (1, 8), 0)
class GRUCellNew(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self._W = nn.Parameter(torch.FloatTensor(input_size + hidden_size,
2 * hidden_size))
self._W_b = nn.Parameter(torch.FloatTensor(2 * hidden_size))
self._U = nn.Parameter(torch.FloatTensor(input_size + hidden_size,
hidden_size))
self._U_b = nn.Parameter(torch.FloatTensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self._W.data)
nn.init.xavier_uniform_(self._U.data)
nn.init.constant_(self._W_b.data, 0)
nn.init.constant_(self._U_b.data, 0)
def forward(self, input_0, input_1):
primals_3 = self._W
primals_4 = self._W_b
primals_5 = self._U
primals_6 = self._U_b
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
Avmb/lm-robustness
|
GRUCell
| false
| 115
|
[
"BSD-3-Clause"
] | 0
|
b5417d9aac01bff0d2a56b506eabed899fd718d4
|
https://github.com/Avmb/lm-robustness/tree/b5417d9aac01bff0d2a56b506eabed899fd718d4
|
BertSelfAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del buf9
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertSelfAttentionNew(nn.Module):
def __init__(self, config):
super(BertSelfAttentionNew, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
McGill-NLP/imagecode
|
BertSelfAttention
| false
| 5,724
|
[
"MIT"
] | 1
|
2c636c6c41d705b4c5861841f29ff689748113d1
|
https://github.com/McGill-NLP/imagecode/tree/2c636c6c41d705b4c5861841f29ff689748113d1
|
Embbed2
|
import torch
class Embbed2(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(Embbed2, self).__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def forward(self, x):
x = torch.matmul(x, self.b.T)
return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1
)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.sin(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr0 + (-4 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.cos(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0
class Embbed2New(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(Embbed2New, self).__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def output_size(self):
return 2 * self.osize
def forward(self, input_0):
primals_1 = self.b
primals_3 = self.a
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
qway/nerfmeshes
|
Embbed2
| false
| 16,309
|
[
"MIT"
] | 113
|
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
|
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
|
TransformerEncoderLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5z/c5zy7julai2lhuinuwjgyl62nx7cyws6ni5poe5jzp7qn532rcgh.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src => add
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p5/cp5zfsohdolod3ikxheo5zxosgk7sw3fpun4wu4lt34g7uwo2pdt.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul_1, where
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, True), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_5 = async_compile.triton('triton_poi_fused_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf10, primals_1, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf11 = reinterpret_tensor(buf5, (4, 16), (16, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 16), (16, 1), torch.bool)
buf13 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_5.run(buf11, primals_7, buf12, buf13, 64, grid=grid(64), stream=stream0)
del buf11
del primals_7
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf15, buf10, primals_9, 16, grid=grid(16), stream=stream0)
del primals_9
return (buf15, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf10, buf12, buf13, primals_8, primals_6, primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_add_4[grid(16)](buf10, primals_1, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf11 = reinterpret_tensor(buf5, (4, 16), (16, 1), 0)
del buf5
extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (4, 16), (1,
4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 16), (16, 1), torch.bool)
buf13 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_leaky_relu_5[grid(64)](buf11, primals_7, buf12,
buf13, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf11
del primals_7
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_8, (16, 4), (1,
16), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_add_4[grid(16)](buf15, buf10, primals_9, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_9
return (buf15, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1),
0), buf10, buf12, buf13, primals_8, primals_6, primals_4,
reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=16, dropout=0):
super(TransformerEncoderLayerNew, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = nn.LeakyReLU(True)
def forward(self, input_0):
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_6 = self.linear1.weight
primals_7 = self.linear1.bias
primals_8 = self.linear2.weight
primals_9 = self.linear2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
imperial-qore/CAROL
|
TransformerEncoderLayer
| false
| 6,872
|
[
"BSD-3-Clause"
] | 1
|
57dc42c4ddeb9e75eed43a91ceb336a1ecc9c8b9
|
https://github.com/imperial-qore/CAROL/tree/57dc42c4ddeb9e75eed43a91ceb336a1ecc9c8b9
|
FCBottleNeck
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zy/czylxf6rfbnbz2ddgd3xovxwjqnfen7sgqej5mnv46j2fekwnniz.py
# Topologically Sorted Source Nodes: [x_pe], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_pe => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048, ), (1, ))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048, ), (1, ))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_pe], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf6, 131072, grid=grid(131072), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_pe_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 131072, grid=grid(131072), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_pe_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2048, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2048, 4), (4, 1))
assert_size_stride(primals_3, (2048,), (1,))
assert_size_stride(primals_4, (2048, 2048), (2048, 1))
assert_size_stride(primals_5, (2048,), (1,))
assert_size_stride(primals_6, (4, 2048), (2048, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1,
primals_3, buf6, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(primals_4, (2048, 2048), (1, 2048), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2048), (32768, 8192, 2048,
1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf3,
primals_5, buf5, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 2048),
(2048, 1), 0), reinterpret_tensor(primals_6, (2048, 4), (1,
2048), 0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0
), reinterpret_tensor(buf3, (64, 2048), (2048, 1), 0
), primals_6, buf5, primals_4, buf6
class FCBottleNeckNew(nn.Module):
def __init__(self, InFeatureSize):
super().__init__()
self.FC1 = nn.Linear(InFeatureSize, 2048)
self.FC2 = nn.Linear(2048, 2048)
self.FC3 = nn.Linear(2048, InFeatureSize)
def forward(self, input_0):
primals_2 = self.FC1.weight
primals_3 = self.FC1.bias
primals_4 = self.FC2.weight
primals_5 = self.FC2.bias
primals_6 = self.FC3.weight
primals_7 = self.FC3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
brown-ivl/beacon
|
FCBottleNeck
| false
| 6,374
|
[
"MIT"
] | 1
|
66a1714473b362294f787f261561e39c52f00e42
|
https://github.com/brown-ivl/beacon/tree/66a1714473b362294f787f261561e39c52f00e42
|
AlphaMish
|
import torch
class AlphaMish(torch.nn.Module):
def __init__(self, in_features):
super().__init__()
self.alpha = torch.nn.Parameter(torch.zeros((in_features, 1, 1)))
self.alpha.requires_grad = True
def forward(self, x):
return torch.mul(x, torch.tanh(torch.mul(1 + torch.nn.functional.
softplus(self.alpha), torch.nn.functional.softplus(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_softplus_tanh_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp0 > tmp2
tmp10 = tl_math.exp(tmp0)
tmp11 = libdevice.log1p(tmp10)
tmp12 = tl.where(tmp9, tmp0, tmp11)
tmp13 = tmp8 * tmp12
tmp14 = libdevice.tanh(tmp13)
tmp15 = tmp0 * tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_softplus_tanh_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class AlphaMishNew(torch.nn.Module):
def __init__(self, in_features):
super().__init__()
self.alpha = torch.nn.Parameter(torch.zeros((in_features, 1, 1)))
self.alpha.requires_grad = True
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
mattroz/yatopi
|
AlphaMish
| false
| 3,991
|
[
"MIT"
] | 0
|
278bac6f3d2f13916ae9d43309b9f38b608426bd
|
https://github.com/mattroz/yatopi/tree/278bac6f3d2f13916ae9d43309b9f38b608426bd
|
GlobalAttention
|
import torch
import torch.nn as nn
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttention(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, is_transform_out, attn_type='dot', attn_hidden=0):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
self.attn_hidden = attn_hidden
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if attn_hidden > 0:
self.transform_in = nn.Sequential(nn.Linear(dim, attn_hidden),
nn.ELU(0.1))
if self.attn_type == 'general':
d = attn_hidden if attn_hidden > 0 else dim
self.linear_in = nn.Linear(d, d, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
if is_transform_out:
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
else:
self.linear_out = None
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def applyMaskBySeqBatch(self, q):
self.applyMask(q.data.eq(table.IO.PAD).t().contiguous().unsqueeze(0))
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_hidden > 0:
h_t = self.transform_in(h_t)
h_s = self.transform_in(h_s)
if self.attn_type == 'general':
h_t = self.linear_in(h_t)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
"""
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if self.mask is not None:
beam_, batch_, sourceL_ = self.mask.size()
aeq(batch, batch_ * beam_)
aeq(sourceL, sourceL_)
align = self.score(input, context)
if self.mask is not None:
mask_ = self.mask.view(batch, 1, sourceL)
align.data.masked_fill_(mask_, -float('inf'))
align_vectors = self.sm(align.view(batch * targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
c = torch.bmm(align_vectors, context)
concat_c = torch.cat([c, input], 2)
if self.linear_out is None:
attn_h = concat_c
else:
attn_h = self.linear_out(concat_c)
if self.attn_type in ['general', 'dot']:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'is_transform_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x3, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttentionNew(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, is_transform_out, attn_type='dot', attn_hidden=0):
super(GlobalAttentionNew, self).__init__()
self.dim = dim
self.attn_type = attn_type
self.attn_hidden = attn_hidden
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if attn_hidden > 0:
self.transform_in = nn.Sequential(nn.Linear(dim, attn_hidden),
nn.ELU(0.1))
if self.attn_type == 'general':
d = attn_hidden if attn_hidden > 0 else dim
self.linear_in = nn.Linear(d, d, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
if is_transform_out:
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
else:
self.linear_out = None
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
self.mask = None
def applyMask(self, mask):
self.mask = mask
def applyMaskBySeqBatch(self, q):
self.applyMask(q.data.eq(table.IO.PAD).t().contiguous().unsqueeze(0))
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_hidden > 0:
h_t = self.transform_in(h_t)
h_s = self.transform_in(h_s)
if self.attn_type == 'general':
h_t = self.linear_in(h_t)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
rajasagashe/coarse2fine
|
GlobalAttention
| false
| 16,315
|
[
"MIT"
] | 164
|
d6c51a3073df9018e32c95c257c68b0d69d9aa46
|
https://github.com/rajasagashe/coarse2fine/tree/d6c51a3073df9018e32c95c257c68b0d69d9aa46
|
RMSELoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/fg/cfgtp4ynxj6iswly436iktrb6rezfyctxdoohnh4z556bxpa2jv4.py
# Topologically Sorted Source Nodes: [mse_loss, loss], Original ATen: [aten.mse_loss, aten.sqrt]
# Source node to ATen node mapping:
# loss => sqrt
# mse_loss => mean, pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
triton_per_fused_mse_loss_sqrt_0 = async_compile.triton('triton_per_fused_mse_loss_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mse_loss, loss], Original ATen: [aten.mse_loss, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_sqrt_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSELossNew(torch.nn.Module):
def __init__(self):
super(RMSELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
gitter-badger/HEPAutoencoders
|
RMSELoss
| false
| 12,429
|
[
"Apache-2.0"
] | 0
|
43010cd66fa4335a04b30b87926148e1c8d92de9
|
https://github.com/gitter-badger/HEPAutoencoders/tree/43010cd66fa4335a04b30b87926148e1c8d92de9
|
SqueezeBertLayerNorm
|
import torch
from torch import nn
import torch.utils.checkpoint
class SqueezeBertLayerNorm(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps)
def forward(self, x):
x = x.permute(0, 2, 1)
x = nn.LayerNorm.forward(self, x)
return x.permute(0, 2, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16, 4)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, 4, XBLOCK=4, YBLOCK=16,
num_warps=1, num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_1
class SqueezeBertLayerNormNew(nn.LayerNorm):
"""
This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
N = batch C = channels W = sequence length
"""
def __init__(self, hidden_size, eps=1e-12):
nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Clemens123/transformers
|
SqueezeBertLayerNorm
| false
| 11,497
|
[
"Apache-2.0"
] | 0
|
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
|
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
|
SIREN_CONV
|
import torch
import numpy as np
import torch.nn as nn
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Sin, self).__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layer(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super(SIREN_layer, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
class SIREN_CONV(nn.Module):
def __init__(self, ch_in, ch_out):
super(SIREN_CONV, self).__init__()
self.conv1 = SIREN_layer(ch_in, 64, frist=True)
self.conv2 = SIREN_layer(64, 32)
self.conv3 = SIREN_layer(32, ch_out)
self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3)
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch_in': 4, 'ch_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sin_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_mul_sin_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_mul_sin_2(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 30.0
tmp4 = tmp2 * tmp3
tmp5 = tl_math.sin(tmp4)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sin_0[grid(4096)](buf1, primals_2,
buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_convolution_mul_sin_1[grid(2048)](buf4, primals_5,
buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_mul_sin_2[grid(256)](buf7, primals_7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf4, buf5, buf7)
def act(act_fun='LeakyReLU'):
"""
Either string defining an activation function or module (e.g. nn.ReLU)
"""
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun[:3] == 'ELU':
if len(act_fun) > 3:
param = float(act_fun[3:])
return nn.ELU(param, inplace=True)
return nn.ELU(inplace=True)
elif act_fun == 'ReLU':
return nn.ReLU()
elif act_fun == 'tanh':
return Tanh()
elif act_fun == 'sine':
return Sin()
elif act_fun == 'soft':
return nn.Softplus()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
class Tanh(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
return torch.tanh(x)
class Sin(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Sin, self).__init__()
def forward(self, x):
return torch.sin(x)
class SIREN_layer(nn.Module):
def __init__(self, ch_in, ch_out, frist=False, act_fun='sine', omega_0=30):
super(SIREN_layer, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, bias
=True)
self.act_fun = act(act_fun)
self.omega_0 = omega_0
self.in_features = ch_in
self.frist = frist
self.init()
def init(self):
with torch.no_grad():
if self.frist:
self.conv1.weight.uniform_(-1 / self.in_features, 1 / self.
in_features)
else:
self.conv1.weight.uniform_(-np.sqrt(6 / self.in_features) /
self.omega_0, np.sqrt(6 / self.in_features) / self.omega_0)
def forward(self, x):
x = self.conv1(x)
return self.act_fun(self.omega_0 * x)
class SIREN_CONVNew(nn.Module):
def __init__(self, ch_in, ch_out):
super(SIREN_CONVNew, self).__init__()
self.conv1 = SIREN_layer(ch_in, 64, frist=True)
self.conv2 = SIREN_layer(64, 32)
self.conv3 = SIREN_layer(32, ch_out)
self.conv = nn.Sequential(self.conv1, self.conv2, self.conv3)
def forward(self, input_0):
primals_1 = self.conv1.conv1.weight
primals_2 = self.conv1.conv1.bias
primals_4 = self.conv2.conv1.weight
primals_5 = self.conv2.conv1.bias
primals_6 = self.conv3.conv1.weight
primals_7 = self.conv3.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dustlrdk/noise2self
|
SIREN_CONV
| false
| 3,454
|
[
"MIT"
] | 0
|
46e8c4650f7ec4f664448417fecd39b4cae477f7
|
https://github.com/dustlrdk/noise2self/tree/46e8c4650f7ec4f664448417fecd39b4cae477f7
|
ResNetBottleneck
|
import torch
from torch import nn
import torch.nn.functional as F
class ResNetBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, bottleneck_channels,
stride, downsample=None):
super(ResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, bottleneck_channels,
kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels,
kernel_size=3, stride=stride, padding=1, bias=False)
self.conv3 = nn.Conv2d(bottleneck_channels, out_channels,
kernel_size=1, bias=False)
self.downsample = downsample
def forward(self, x):
residual = x
out = F.relu(self.conv1(x), inplace=True)
out = F.relu(self.conv2(out), inplace=True)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = F.relu(out, inplace=True)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'bottleneck_channels':
4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(256)](buf3, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf5,
primals_1, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf5, primals_1, primals_2, primals_3, primals_4, buf1, buf3, buf6
class ResNetBottleneckNew(nn.Module):
def __init__(self, in_channels, out_channels, bottleneck_channels,
stride, downsample=None):
super(ResNetBottleneckNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels, bottleneck_channels,
kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels,
kernel_size=3, stride=stride, padding=1, bias=False)
self.conv3 = nn.Conv2d(bottleneck_channels, out_channels,
kernel_size=1, bias=False)
self.downsample = downsample
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
KH-Kyle/rmp_nav
|
ResNetBottleneck
| false
| 8,396
|
[
"MIT"
] | 30
|
d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
|
https://github.com/KH-Kyle/rmp_nav/tree/d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
|
Truncation2D
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/i6/ci6z5mzow5osw3dg4aqxcjm6cnkkx2erpzc5642i5hzvuq2fje2j.py
# Topologically Sorted Source Nodes: [iadd_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_2 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_36, %select_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 22, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp264 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp4 & tmp6
tmp8 = tmp7 & tmp2
tmp9 = tmp2 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp2 & tmp10
tmp12 = tmp3 < tmp1
tmp13 = tmp12 & tmp11
tmp14 = tmp2 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (8 + x2), tmp15 & xmask, other=0.0)
tmp17 = 0.0
tmp18 = tmp17 + tmp16
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp15, tmp18, tmp19)
tmp21 = tl.where(tmp12, tmp20, tmp17)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.where(tmp2, tmp23, tmp17)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp2 & tmp11
tmp28 = tmp12 & tmp27
tmp29 = tl.load(in_ptr0 + (8 + x2), tmp28 & xmask, other=0.0)
tmp30 = tmp17 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp28, tmp30, tmp31)
tmp33 = tl.where(tmp12, tmp32, tmp17)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp2, tmp35, tmp17)
tmp37 = tl.where(tmp12, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp11, tmp37, tmp38)
tmp40 = tl.load(in_ptr0 + (8 + x2), tmp13 & xmask, other=0.0)
tmp41 = tmp17 + tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp13, tmp41, tmp42)
tmp44 = tl.where(tmp12, tmp43, tmp17)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp11, tmp44, tmp45)
tmp47 = tl.where(tmp2, tmp46, tmp17)
tmp48 = tl.where(tmp2, tmp39, tmp47)
tmp49 = tl.load(in_ptr0 + (20 + x2), tmp10 & xmask, other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp10, tmp50, tmp51)
tmp53 = tmp2 & tmp9
tmp54 = tmp12 & tmp53
tmp55 = tmp2 & tmp54
tmp56 = tmp12 & tmp55
tmp57 = tl.load(in_ptr0 + (8 + x2), tmp56 & xmask, other=0.0)
tmp58 = tmp17 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp56, tmp58, tmp59)
tmp61 = tl.where(tmp12, tmp60, tmp17)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp55, tmp61, tmp62)
tmp64 = tl.where(tmp2, tmp63, tmp17)
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp54, tmp64, tmp65)
tmp67 = tmp2 & tmp53
tmp68 = tmp12 & tmp67
tmp69 = tl.load(in_ptr0 + (8 + x2), tmp68 & xmask, other=0.0)
tmp70 = tmp17 + tmp69
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp68, tmp70, tmp71)
tmp73 = tl.where(tmp12, tmp72, tmp17)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp67, tmp73, tmp74)
tmp76 = tl.where(tmp2, tmp75, tmp17)
tmp77 = tl.where(tmp12, tmp66, tmp76)
tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype)
tmp79 = tl.where(tmp53, tmp77, tmp78)
tmp80 = tl.load(in_ptr0 + (8 + x2), tmp54 & xmask, other=0.0)
tmp81 = tmp17 + tmp80
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp54, tmp81, tmp82)
tmp84 = tl.where(tmp12, tmp83, tmp17)
tmp85 = tl.full(tmp84.shape, 0.0, tmp84.dtype)
tmp86 = tl.where(tmp53, tmp84, tmp85)
tmp87 = tl.where(tmp2, tmp86, tmp17)
tmp88 = tl.where(tmp2, tmp79, tmp87)
tmp89 = tl.where(tmp7, tmp52, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp9, tmp89, tmp90)
tmp92 = tmp12 & tmp9
tmp93 = tmp2 & tmp92
tmp94 = tmp12 & tmp93
tmp95 = tl.load(in_ptr0 + (8 + x2), tmp94 & xmask, other=0.0)
tmp96 = tmp17 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp94, tmp96, tmp97)
tmp99 = tl.where(tmp12, tmp98, tmp17)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp93, tmp99, tmp100)
tmp102 = tl.where(tmp2, tmp101, tmp17)
tmp103 = tl.full(tmp102.shape, 0.0, tmp102.dtype)
tmp104 = tl.where(tmp92, tmp102, tmp103)
tmp105 = tl.where(tmp12, tmp104, tmp87)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tl.load(in_ptr0 + (8 + x2), tmp92 & xmask, other=0.0)
tmp109 = tmp17 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp92, tmp109, tmp110)
tmp112 = tl.where(tmp12, tmp111, tmp17)
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp9, tmp112, tmp113)
tmp115 = tl.where(tmp2, tmp114, tmp17)
tmp116 = tl.where(tmp2, tmp107, tmp115)
tmp117 = tl.where(tmp2, tmp91, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp8, tmp117, tmp118)
tmp120 = tmp2 & tmp2
tmp121 = tmp7 & tmp120
tmp122 = tmp2 & tmp121
tmp123 = tmp12 & tmp122
tmp124 = tmp2 & tmp123
tmp125 = tmp12 & tmp124
tmp126 = tl.load(in_ptr0 + (8 + x2), tmp125 & xmask, other=0.0)
tmp127 = tmp17 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp125, tmp127, tmp128)
tmp130 = tl.where(tmp12, tmp129, tmp17)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp124, tmp130, tmp131)
tmp133 = tl.where(tmp2, tmp132, tmp17)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp123, tmp133, tmp134)
tmp136 = tmp2 & tmp122
tmp137 = tmp12 & tmp136
tmp138 = tl.load(in_ptr0 + (8 + x2), tmp137 & xmask, other=0.0)
tmp139 = tmp17 + tmp138
tmp140 = tl.full(tmp139.shape, 0.0, tmp139.dtype)
tmp141 = tl.where(tmp137, tmp139, tmp140)
tmp142 = tl.where(tmp12, tmp141, tmp17)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp136, tmp142, tmp143)
tmp145 = tl.where(tmp2, tmp144, tmp17)
tmp146 = tl.where(tmp12, tmp135, tmp145)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp122, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (8 + x2), tmp123 & xmask, other=0.0)
tmp150 = tmp17 + tmp149
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp123, tmp150, tmp151)
tmp153 = tl.where(tmp12, tmp152, tmp17)
tmp154 = tl.full(tmp153.shape, 0.0, tmp153.dtype)
tmp155 = tl.where(tmp122, tmp153, tmp154)
tmp156 = tl.where(tmp2, tmp155, tmp17)
tmp157 = tl.where(tmp2, tmp148, tmp156)
tmp158 = tl.load(in_ptr0 + (20 + x2), tmp121 & xmask, other=0.0)
tmp159 = tmp157 + tmp158
tmp160 = tl.full(tmp159.shape, 0.0, tmp159.dtype)
tmp161 = tl.where(tmp121, tmp159, tmp160)
tmp162 = tmp2 & tmp120
tmp163 = tmp12 & tmp162
tmp164 = tmp2 & tmp163
tmp165 = tmp12 & tmp164
tmp166 = tl.load(in_ptr0 + (8 + x2), tmp165 & xmask, other=0.0)
tmp167 = tmp17 + tmp166
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp165, tmp167, tmp168)
tmp170 = tl.where(tmp12, tmp169, tmp17)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp164, tmp170, tmp171)
tmp173 = tl.where(tmp2, tmp172, tmp17)
tmp174 = tl.full(tmp173.shape, 0.0, tmp173.dtype)
tmp175 = tl.where(tmp163, tmp173, tmp174)
tmp176 = tmp2 & tmp162
tmp177 = tmp12 & tmp176
tmp178 = tl.load(in_ptr0 + (8 + x2), tmp177 & xmask, other=0.0)
tmp179 = tmp17 + tmp178
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp177, tmp179, tmp180)
tmp182 = tl.where(tmp12, tmp181, tmp17)
tmp183 = tl.full(tmp182.shape, 0.0, tmp182.dtype)
tmp184 = tl.where(tmp176, tmp182, tmp183)
tmp185 = tl.where(tmp2, tmp184, tmp17)
tmp186 = tl.where(tmp12, tmp175, tmp185)
tmp187 = tl.full(tmp186.shape, 0.0, tmp186.dtype)
tmp188 = tl.where(tmp162, tmp186, tmp187)
tmp189 = tl.load(in_ptr0 + (8 + x2), tmp163 & xmask, other=0.0)
tmp190 = tmp17 + tmp189
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp163, tmp190, tmp191)
tmp193 = tl.where(tmp12, tmp192, tmp17)
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp162, tmp193, tmp194)
tmp196 = tl.where(tmp2, tmp195, tmp17)
tmp197 = tl.where(tmp2, tmp188, tmp196)
tmp198 = tl.where(tmp7, tmp161, tmp197)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp120, tmp198, tmp199)
tmp201 = tmp12 & tmp120
tmp202 = tmp2 & tmp201
tmp203 = tmp12 & tmp202
tmp204 = tl.load(in_ptr0 + (8 + x2), tmp203 & xmask, other=0.0)
tmp205 = tmp17 + tmp204
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp203, tmp205, tmp206)
tmp208 = tl.where(tmp12, tmp207, tmp17)
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp202, tmp208, tmp209)
tmp211 = tl.where(tmp2, tmp210, tmp17)
tmp212 = tl.full(tmp211.shape, 0.0, tmp211.dtype)
tmp213 = tl.where(tmp201, tmp211, tmp212)
tmp214 = tl.where(tmp12, tmp213, tmp196)
tmp215 = tl.full(tmp214.shape, 0.0, tmp214.dtype)
tmp216 = tl.where(tmp120, tmp214, tmp215)
tmp217 = tl.load(in_ptr0 + (8 + x2), tmp201 & xmask, other=0.0)
tmp218 = tmp17 + tmp217
tmp219 = tl.full(tmp218.shape, 0.0, tmp218.dtype)
tmp220 = tl.where(tmp201, tmp218, tmp219)
tmp221 = tl.where(tmp12, tmp220, tmp17)
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp120, tmp221, tmp222)
tmp224 = tl.where(tmp2, tmp223, tmp17)
tmp225 = tl.where(tmp2, tmp216, tmp224)
tmp226 = tl.where(tmp2, tmp200, tmp225)
tmp227 = tl.where(tmp7, tmp119, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp2, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (20 + x2), tmp8 & xmask, other=0.0)
tmp231 = tmp116 + tmp230
tmp232 = tl.full(tmp231.shape, 0.0, tmp231.dtype)
tmp233 = tl.where(tmp8, tmp231, tmp232)
tmp234 = tl.where(tmp7, tmp233, tmp225)
tmp235 = tl.full(tmp234.shape, 0.0, tmp234.dtype)
tmp236 = tl.where(tmp2, tmp234, tmp235)
tmp237 = tmp12 & tmp2
tmp238 = tmp2 & tmp237
tmp239 = tmp12 & tmp238
tmp240 = tl.load(in_ptr0 + (8 + x2), tmp239 & xmask, other=0.0)
tmp241 = tmp17 + tmp240
tmp242 = tl.full(tmp241.shape, 0.0, tmp241.dtype)
tmp243 = tl.where(tmp239, tmp241, tmp242)
tmp244 = tl.where(tmp12, tmp243, tmp17)
tmp245 = tl.full(tmp244.shape, 0.0, tmp244.dtype)
tmp246 = tl.where(tmp238, tmp244, tmp245)
tmp247 = tl.where(tmp2, tmp246, tmp17)
tmp248 = tl.full(tmp247.shape, 0.0, tmp247.dtype)
tmp249 = tl.where(tmp237, tmp247, tmp248)
tmp250 = tl.where(tmp12, tmp249, tmp224)
tmp251 = tl.full(tmp250.shape, 0.0, tmp250.dtype)
tmp252 = tl.where(tmp2, tmp250, tmp251)
tmp253 = tl.load(in_ptr0 + (8 + x2), tmp237 & xmask, other=0.0)
tmp254 = tmp17 + tmp253
tmp255 = tl.full(tmp254.shape, 0.0, tmp254.dtype)
tmp256 = tl.where(tmp237, tmp254, tmp255)
tmp257 = tl.where(tmp12, tmp256, tmp17)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp2, tmp257, tmp258)
tmp260 = tl.where(tmp2, tmp259, tmp17)
tmp261 = tl.where(tmp2, tmp252, tmp260)
tmp262 = tl.where(tmp2, tmp236, tmp261)
tmp263 = tl.where(tmp2, tmp229, tmp262)
tmp265 = tmp263 + tmp264
tl.store(out_ptr0 + (x2), tmp265, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7n/c7ndkv5xcqcdpgnxbwsw3uvfe5enml37jmdeboltxlcmhvnwppbe.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %slice_scatter_default_8 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_4, %add_2, 1, 8, 12), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 22, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-8) + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tmp7 = x1
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp0 >= tmp8
tmp11 = tmp0 < tmp1
tmp12 = tmp10 & tmp11
tmp13 = tmp12 & tmp9
tmp14 = tmp9 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tmp9 & tmp15
tmp17 = tmp0 < tmp8
tmp18 = tmp17 & tmp16
tmp19 = tmp9 & tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp20 & xmask, other=0.0)
tmp22 = 0.0
tmp23 = tmp22 + tmp21
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp17, tmp25, tmp22)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp19, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp28, tmp22)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp18, tmp29, tmp30)
tmp32 = tmp9 & tmp16
tmp33 = tmp17 & tmp32
tmp34 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp33 & xmask, other=0.0)
tmp35 = tmp22 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp33, tmp35, tmp36)
tmp38 = tl.where(tmp17, tmp37, tmp22)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp9, tmp40, tmp22)
tmp42 = tl.where(tmp17, tmp31, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp16, tmp42, tmp43)
tmp45 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp18 & xmask, other=0.0)
tmp46 = tmp22 + tmp45
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp18, tmp46, tmp47)
tmp49 = tl.where(tmp17, tmp48, tmp22)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp9, tmp51, tmp22)
tmp53 = tl.where(tmp9, tmp44, tmp52)
tmp54 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp55 = tmp53 + tmp54
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp15, tmp55, tmp56)
tmp58 = tmp9 & tmp14
tmp59 = tmp17 & tmp58
tmp60 = tmp9 & tmp59
tmp61 = tmp17 & tmp60
tmp62 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp61 & xmask, other=0.0)
tmp63 = tmp22 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp61, tmp63, tmp64)
tmp66 = tl.where(tmp17, tmp65, tmp22)
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp60, tmp66, tmp67)
tmp69 = tl.where(tmp9, tmp68, tmp22)
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp59, tmp69, tmp70)
tmp72 = tmp9 & tmp58
tmp73 = tmp17 & tmp72
tmp74 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp73 & xmask, other=0.0)
tmp75 = tmp22 + tmp74
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp73, tmp75, tmp76)
tmp78 = tl.where(tmp17, tmp77, tmp22)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp72, tmp78, tmp79)
tmp81 = tl.where(tmp9, tmp80, tmp22)
tmp82 = tl.where(tmp17, tmp71, tmp81)
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp58, tmp82, tmp83)
tmp85 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp59 & xmask, other=0.0)
tmp86 = tmp22 + tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp59, tmp86, tmp87)
tmp89 = tl.where(tmp17, tmp88, tmp22)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp58, tmp89, tmp90)
tmp92 = tl.where(tmp9, tmp91, tmp22)
tmp93 = tl.where(tmp9, tmp84, tmp92)
tmp94 = tl.where(tmp12, tmp57, tmp93)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp14, tmp94, tmp95)
tmp97 = tmp17 & tmp14
tmp98 = tmp9 & tmp97
tmp99 = tmp17 & tmp98
tmp100 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp99 & xmask, other=0.0)
tmp101 = tmp22 + tmp100
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp99, tmp101, tmp102)
tmp104 = tl.where(tmp17, tmp103, tmp22)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp98, tmp104, tmp105)
tmp107 = tl.where(tmp9, tmp106, tmp22)
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp97, tmp107, tmp108)
tmp110 = tl.where(tmp17, tmp109, tmp92)
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp14, tmp110, tmp111)
tmp113 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp97 & xmask, other=0.0)
tmp114 = tmp22 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp97, tmp114, tmp115)
tmp117 = tl.where(tmp17, tmp116, tmp22)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp14, tmp117, tmp118)
tmp120 = tl.where(tmp9, tmp119, tmp22)
tmp121 = tl.where(tmp9, tmp112, tmp120)
tmp122 = tl.where(tmp9, tmp96, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp13, tmp122, tmp123)
tmp125 = tmp9 & tmp9
tmp126 = tmp12 & tmp125
tmp127 = tmp9 & tmp126
tmp128 = tmp17 & tmp127
tmp129 = tmp9 & tmp128
tmp130 = tmp17 & tmp129
tmp131 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp130 & xmask, other=0.0)
tmp132 = tmp22 + tmp131
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp130, tmp132, tmp133)
tmp135 = tl.where(tmp17, tmp134, tmp22)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp129, tmp135, tmp136)
tmp138 = tl.where(tmp9, tmp137, tmp22)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp128, tmp138, tmp139)
tmp141 = tmp9 & tmp127
tmp142 = tmp17 & tmp141
tmp143 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp142 & xmask, other=0.0)
tmp144 = tmp22 + tmp143
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp142, tmp144, tmp145)
tmp147 = tl.where(tmp17, tmp146, tmp22)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp141, tmp147, tmp148)
tmp150 = tl.where(tmp9, tmp149, tmp22)
tmp151 = tl.where(tmp17, tmp140, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp127, tmp151, tmp152)
tmp154 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp128 & xmask, other=0.0)
tmp155 = tmp22 + tmp154
tmp156 = tl.full(tmp155.shape, 0.0, tmp155.dtype)
tmp157 = tl.where(tmp128, tmp155, tmp156)
tmp158 = tl.where(tmp17, tmp157, tmp22)
tmp159 = tl.full(tmp158.shape, 0.0, tmp158.dtype)
tmp160 = tl.where(tmp127, tmp158, tmp159)
tmp161 = tl.where(tmp9, tmp160, tmp22)
tmp162 = tl.where(tmp9, tmp153, tmp161)
tmp163 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp126 & xmask, other=0.0)
tmp164 = tmp162 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp126, tmp164, tmp165)
tmp167 = tmp9 & tmp125
tmp168 = tmp17 & tmp167
tmp169 = tmp9 & tmp168
tmp170 = tmp17 & tmp169
tmp171 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp170 & xmask, other=0.0)
tmp172 = tmp22 + tmp171
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp170, tmp172, tmp173)
tmp175 = tl.where(tmp17, tmp174, tmp22)
tmp176 = tl.full(tmp175.shape, 0.0, tmp175.dtype)
tmp177 = tl.where(tmp169, tmp175, tmp176)
tmp178 = tl.where(tmp9, tmp177, tmp22)
tmp179 = tl.full(tmp178.shape, 0.0, tmp178.dtype)
tmp180 = tl.where(tmp168, tmp178, tmp179)
tmp181 = tmp9 & tmp167
tmp182 = tmp17 & tmp181
tmp183 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp182 & xmask, other=0.0)
tmp184 = tmp22 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp182, tmp184, tmp185)
tmp187 = tl.where(tmp17, tmp186, tmp22)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp181, tmp187, tmp188)
tmp190 = tl.where(tmp9, tmp189, tmp22)
tmp191 = tl.where(tmp17, tmp180, tmp190)
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp167, tmp191, tmp192)
tmp194 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp168 & xmask, other=0.0)
tmp195 = tmp22 + tmp194
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp168, tmp195, tmp196)
tmp198 = tl.where(tmp17, tmp197, tmp22)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp167, tmp198, tmp199)
tmp201 = tl.where(tmp9, tmp200, tmp22)
tmp202 = tl.where(tmp9, tmp193, tmp201)
tmp203 = tl.where(tmp12, tmp166, tmp202)
tmp204 = tl.full(tmp203.shape, 0.0, tmp203.dtype)
tmp205 = tl.where(tmp125, tmp203, tmp204)
tmp206 = tmp17 & tmp125
tmp207 = tmp9 & tmp206
tmp208 = tmp17 & tmp207
tmp209 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp208 & xmask, other=0.0)
tmp210 = tmp22 + tmp209
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp208, tmp210, tmp211)
tmp213 = tl.where(tmp17, tmp212, tmp22)
tmp214 = tl.full(tmp213.shape, 0.0, tmp213.dtype)
tmp215 = tl.where(tmp207, tmp213, tmp214)
tmp216 = tl.where(tmp9, tmp215, tmp22)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp206, tmp216, tmp217)
tmp219 = tl.where(tmp17, tmp218, tmp201)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp125, tmp219, tmp220)
tmp222 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp206 & xmask, other=0.0)
tmp223 = tmp22 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp206, tmp223, tmp224)
tmp226 = tl.where(tmp17, tmp225, tmp22)
tmp227 = tl.full(tmp226.shape, 0.0, tmp226.dtype)
tmp228 = tl.where(tmp125, tmp226, tmp227)
tmp229 = tl.where(tmp9, tmp228, tmp22)
tmp230 = tl.where(tmp9, tmp221, tmp229)
tmp231 = tl.where(tmp9, tmp205, tmp230)
tmp232 = tl.where(tmp12, tmp124, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp13 & xmask, other=0.0)
tmp236 = tmp121 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp13, tmp236, tmp237)
tmp239 = tl.where(tmp12, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp9, tmp239, tmp240)
tmp242 = tmp17 & tmp9
tmp243 = tmp9 & tmp242
tmp244 = tmp17 & tmp243
tmp245 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp244 & xmask, other=0.0)
tmp246 = tmp22 + tmp245
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp244, tmp246, tmp247)
tmp249 = tl.where(tmp17, tmp248, tmp22)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp243, tmp249, tmp250)
tmp252 = tl.where(tmp9, tmp251, tmp22)
tmp253 = tl.full(tmp252.shape, 0.0, tmp252.dtype)
tmp254 = tl.where(tmp242, tmp252, tmp253)
tmp255 = tl.where(tmp17, tmp254, tmp229)
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp9, tmp255, tmp256)
tmp258 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp242 & xmask, other=0.0)
tmp259 = tmp22 + tmp258
tmp260 = tl.full(tmp259.shape, 0.0, tmp259.dtype)
tmp261 = tl.where(tmp242, tmp259, tmp260)
tmp262 = tl.where(tmp17, tmp261, tmp22)
tmp263 = tl.full(tmp262.shape, 0.0, tmp262.dtype)
tmp264 = tl.where(tmp9, tmp262, tmp263)
tmp265 = tl.where(tmp9, tmp264, tmp22)
tmp266 = tl.where(tmp9, tmp257, tmp265)
tmp267 = tl.where(tmp9, tmp241, tmp266)
tmp268 = tl.where(tmp9, tmp234, tmp267)
tmp269 = tl.where(tmp5, tmp6, tmp268)
tl.store(out_ptr0 + (x2), tmp269, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rs/crsismazcidlj2wh2jvwydvqnpv6f4wjc5xse4fhqmjr6y73kkqp.py
# Topologically Sorted Source Nodes: [output, iadd, iadd_1, iadd_3, iadd_4], Original ATen: [aten.zeros, aten.add]
# Source node to ATen node mapping:
# iadd => add
# iadd_1 => add_1
# iadd_3 => add_3
# iadd_4 => add_4
# output => full
# Graph fragment:
# %full : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([16, 16], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %select_1), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %add, 1, 0, 4), kwargs = {})
# %slice_scatter_default_1 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %slice_scatter_default, 0, 0, 4), kwargs = {})
# %slice_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %slice_7, 1, 0, 4), kwargs = {})
# %slice_scatter_default_3 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_scatter_default_2, 0, 0, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_20, %select_3), kwargs = {})
# %slice_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_2, %add_1, 1, 4, 8), kwargs = {})
# %slice_scatter_default_5 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %slice_scatter_default_4, 0, 0, 4), kwargs = {})
# %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_3, %slice_23, 1, 4, 8), kwargs = {})
# %slice_scatter_default_7 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %slice_scatter_default_6, 0, 0, 4), kwargs = {})
# %slice_scatter_default_9 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %slice_scatter_default_8, 0, 0, 4), kwargs = {})
# %slice_scatter_default_10 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_5, %slice_39, 1, 8, 12), kwargs = {})
# %slice_scatter_default_11 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %slice_scatter_default_10, 0, 0, 4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_52, %select_7), kwargs = {})
# %slice_scatter_default_12 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_6, %add_3, 1, 12, 16), kwargs = {})
# %slice_scatter_default_13 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %slice_scatter_default_12, 0, 0, 4), kwargs = {})
# %slice_scatter_default_14 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_7, %slice_55, 1, 12, 16), kwargs = {})
# %slice_scatter_default_15 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %slice_scatter_default_14, 0, 0, 4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_68, %select_9), kwargs = {})
# %slice_scatter_default_16 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_8, %add_4, 1, 0, 4), kwargs = {})
# %slice_scatter_default_17 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %slice_scatter_default_16, 0, 4, 8), kwargs = {})
triton_poi_fused_add_zeros_2 = async_compile.triton('triton_poi_fused_add_zeros_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_zeros_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_zeros_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x2), tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tmp4 >= tmp1
tmp6 = tl.full([1], 8, tl.int64)
tmp7 = tmp4 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp8 & tmp10
tmp12 = tmp2 & tmp11
tmp13 = tmp4 < tmp1
tmp14 = tmp13 & tmp12
tmp15 = tmp2 & tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp16 & xmask, other=0.0)
tmp18 = 0.0
tmp19 = tmp18 + tmp17
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp16, tmp19, tmp20)
tmp22 = tl.where(tmp13, tmp21, tmp18)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp15, tmp22, tmp23)
tmp25 = tl.where(tmp2, tmp24, tmp18)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tmp2 & tmp12
tmp29 = tmp13 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp29 & xmask, other=0.0)
tmp31 = tmp18 + tmp30
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tl.where(tmp13, tmp33, tmp18)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp28, tmp34, tmp35)
tmp37 = tl.where(tmp2, tmp36, tmp18)
tmp38 = tl.where(tmp13, tmp27, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp12, tmp38, tmp39)
tmp41 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp14 & xmask, other=0.0)
tmp42 = tmp18 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp14, tmp42, tmp43)
tmp45 = tl.where(tmp13, tmp44, tmp18)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp12, tmp45, tmp46)
tmp48 = tl.where(tmp2, tmp47, tmp18)
tmp49 = tl.where(tmp2, tmp40, tmp48)
tmp50 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp11 & xmask, other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp11, tmp51, tmp52)
tmp54 = tmp2 & tmp10
tmp55 = tmp13 & tmp54
tmp56 = tmp2 & tmp55
tmp57 = tmp13 & tmp56
tmp58 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp57 & xmask, other=0.0)
tmp59 = tmp18 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp57, tmp59, tmp60)
tmp62 = tl.where(tmp13, tmp61, tmp18)
tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype)
tmp64 = tl.where(tmp56, tmp62, tmp63)
tmp65 = tl.where(tmp2, tmp64, tmp18)
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp55, tmp65, tmp66)
tmp68 = tmp2 & tmp54
tmp69 = tmp13 & tmp68
tmp70 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp69 & xmask, other=0.0)
tmp71 = tmp18 + tmp70
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp69, tmp71, tmp72)
tmp74 = tl.where(tmp13, tmp73, tmp18)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp68, tmp74, tmp75)
tmp77 = tl.where(tmp2, tmp76, tmp18)
tmp78 = tl.where(tmp13, tmp67, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp55 & xmask, other=0.0)
tmp82 = tmp18 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp55, tmp82, tmp83)
tmp85 = tl.where(tmp13, tmp84, tmp18)
tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype)
tmp87 = tl.where(tmp54, tmp85, tmp86)
tmp88 = tl.where(tmp2, tmp87, tmp18)
tmp89 = tl.where(tmp2, tmp80, tmp88)
tmp90 = tl.where(tmp8, tmp53, tmp89)
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp10, tmp90, tmp91)
tmp93 = tmp13 & tmp10
tmp94 = tmp2 & tmp93
tmp95 = tmp13 & tmp94
tmp96 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp95 & xmask, other=0.0)
tmp97 = tmp18 + tmp96
tmp98 = tl.full(tmp97.shape, 0.0, tmp97.dtype)
tmp99 = tl.where(tmp95, tmp97, tmp98)
tmp100 = tl.where(tmp13, tmp99, tmp18)
tmp101 = tl.full(tmp100.shape, 0.0, tmp100.dtype)
tmp102 = tl.where(tmp94, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp102, tmp18)
tmp104 = tl.full(tmp103.shape, 0.0, tmp103.dtype)
tmp105 = tl.where(tmp93, tmp103, tmp104)
tmp106 = tl.where(tmp13, tmp105, tmp88)
tmp107 = tl.full(tmp106.shape, 0.0, tmp106.dtype)
tmp108 = tl.where(tmp10, tmp106, tmp107)
tmp109 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp93 & xmask, other=0.0)
tmp110 = tmp18 + tmp109
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp93, tmp110, tmp111)
tmp113 = tl.where(tmp13, tmp112, tmp18)
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp10, tmp113, tmp114)
tmp116 = tl.where(tmp2, tmp115, tmp18)
tmp117 = tl.where(tmp2, tmp108, tmp116)
tmp118 = tl.where(tmp2, tmp92, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp9, tmp118, tmp119)
tmp121 = tmp2 & tmp2
tmp122 = tmp8 & tmp121
tmp123 = tmp2 & tmp122
tmp124 = tmp13 & tmp123
tmp125 = tmp2 & tmp124
tmp126 = tmp13 & tmp125
tmp127 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp126 & xmask, other=0.0)
tmp128 = tmp18 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp126, tmp128, tmp129)
tmp131 = tl.where(tmp13, tmp130, tmp18)
tmp132 = tl.full(tmp131.shape, 0.0, tmp131.dtype)
tmp133 = tl.where(tmp125, tmp131, tmp132)
tmp134 = tl.where(tmp2, tmp133, tmp18)
tmp135 = tl.full(tmp134.shape, 0.0, tmp134.dtype)
tmp136 = tl.where(tmp124, tmp134, tmp135)
tmp137 = tmp2 & tmp123
tmp138 = tmp13 & tmp137
tmp139 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp138 & xmask, other=0.0)
tmp140 = tmp18 + tmp139
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp138, tmp140, tmp141)
tmp143 = tl.where(tmp13, tmp142, tmp18)
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp137, tmp143, tmp144)
tmp146 = tl.where(tmp2, tmp145, tmp18)
tmp147 = tl.where(tmp13, tmp136, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp123, tmp147, tmp148)
tmp150 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp124 & xmask, other=0.0)
tmp151 = tmp18 + tmp150
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp124, tmp151, tmp152)
tmp154 = tl.where(tmp13, tmp153, tmp18)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp123, tmp154, tmp155)
tmp157 = tl.where(tmp2, tmp156, tmp18)
tmp158 = tl.where(tmp2, tmp149, tmp157)
tmp159 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp122 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp122, tmp160, tmp161)
tmp163 = tmp2 & tmp121
tmp164 = tmp13 & tmp163
tmp165 = tmp2 & tmp164
tmp166 = tmp13 & tmp165
tmp167 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp166 & xmask, other=0.0)
tmp168 = tmp18 + tmp167
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp166, tmp168, tmp169)
tmp171 = tl.where(tmp13, tmp170, tmp18)
tmp172 = tl.full(tmp171.shape, 0.0, tmp171.dtype)
tmp173 = tl.where(tmp165, tmp171, tmp172)
tmp174 = tl.where(tmp2, tmp173, tmp18)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp164, tmp174, tmp175)
tmp177 = tmp2 & tmp163
tmp178 = tmp13 & tmp177
tmp179 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp178 & xmask, other=0.0)
tmp180 = tmp18 + tmp179
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp178, tmp180, tmp181)
tmp183 = tl.where(tmp13, tmp182, tmp18)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp177, tmp183, tmp184)
tmp186 = tl.where(tmp2, tmp185, tmp18)
tmp187 = tl.where(tmp13, tmp176, tmp186)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp163, tmp187, tmp188)
tmp190 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp164 & xmask, other=0.0)
tmp191 = tmp18 + tmp190
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp164, tmp191, tmp192)
tmp194 = tl.where(tmp13, tmp193, tmp18)
tmp195 = tl.full(tmp194.shape, 0.0, tmp194.dtype)
tmp196 = tl.where(tmp163, tmp194, tmp195)
tmp197 = tl.where(tmp2, tmp196, tmp18)
tmp198 = tl.where(tmp2, tmp189, tmp197)
tmp199 = tl.where(tmp8, tmp162, tmp198)
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp121, tmp199, tmp200)
tmp202 = tmp13 & tmp121
tmp203 = tmp2 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp204 & xmask, other=0.0)
tmp206 = tmp18 + tmp205
tmp207 = tl.full(tmp206.shape, 0.0, tmp206.dtype)
tmp208 = tl.where(tmp204, tmp206, tmp207)
tmp209 = tl.where(tmp13, tmp208, tmp18)
tmp210 = tl.full(tmp209.shape, 0.0, tmp209.dtype)
tmp211 = tl.where(tmp203, tmp209, tmp210)
tmp212 = tl.where(tmp2, tmp211, tmp18)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp202, tmp212, tmp213)
tmp215 = tl.where(tmp13, tmp214, tmp197)
tmp216 = tl.full(tmp215.shape, 0.0, tmp215.dtype)
tmp217 = tl.where(tmp121, tmp215, tmp216)
tmp218 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp202 & xmask, other=0.0)
tmp219 = tmp18 + tmp218
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp202, tmp219, tmp220)
tmp222 = tl.where(tmp13, tmp221, tmp18)
tmp223 = tl.full(tmp222.shape, 0.0, tmp222.dtype)
tmp224 = tl.where(tmp121, tmp222, tmp223)
tmp225 = tl.where(tmp2, tmp224, tmp18)
tmp226 = tl.where(tmp2, tmp217, tmp225)
tmp227 = tl.where(tmp2, tmp201, tmp226)
tmp228 = tl.where(tmp8, tmp120, tmp227)
tmp229 = tl.full(tmp228.shape, 0.0, tmp228.dtype)
tmp230 = tl.where(tmp2, tmp228, tmp229)
tmp231 = tl.load(in_ptr1 + (12 + x0 + (4*x1)), tmp9 & xmask, other=0.0)
tmp232 = tmp117 + tmp231
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.where(tmp8, tmp234, tmp226)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp2, tmp235, tmp236)
tmp238 = tmp13 & tmp2
tmp239 = tmp2 & tmp238
tmp240 = tmp13 & tmp239
tmp241 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp240 & xmask, other=0.0)
tmp242 = tmp18 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp13, tmp244, tmp18)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp2, tmp247, tmp18)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp13, tmp250, tmp225)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp2, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp238 & xmask, other=0.0)
tmp255 = tmp18 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp13, tmp257, tmp18)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp2, tmp258, tmp259)
tmp261 = tl.where(tmp2, tmp260, tmp18)
tmp262 = tl.where(tmp2, tmp253, tmp261)
tmp263 = tl.where(tmp2, tmp237, tmp262)
tmp264 = tl.where(tmp2, tmp230, tmp263)
tmp265 = tl.where(tmp2, tmp3, tmp264)
tmp266 = tmp0 >= tmp1
tmp267 = tmp0 < tmp6
tmp268 = tmp266 & tmp267
tmp269 = tmp13 & tmp268
tmp270 = tmp2 & tmp269
tmp271 = tl.full([1], 12, tl.int64)
tmp272 = tmp4 >= tmp271
tmp273 = tmp272 & tmp270
tmp274 = tmp2 & tmp273
tmp275 = tmp272 & tmp274
tmp276 = tmp2 & tmp275
tmp277 = tmp4 >= tmp6
tmp278 = tmp4 < tmp271
tmp279 = tmp277 & tmp278
tmp280 = tmp279 & tmp276
tmp281 = tl.where(tmp279, tmp265, tmp265)
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp276, tmp281, tmp282)
tmp284 = tl.where(tmp2, tmp283, tmp265)
tmp285 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp275 & xmask, other=0.0)
tmp286 = tmp284 + tmp285
tmp287 = tl.full(tmp286.shape, 0.0, tmp286.dtype)
tmp288 = tl.where(tmp275, tmp286, tmp287)
tmp289 = tmp2 & tmp274
tmp290 = tmp279 & tmp289
tmp291 = tl.where(tmp289, tmp281, tmp282)
tmp292 = tl.where(tmp2, tmp291, tmp265)
tmp293 = tl.where(tmp272, tmp288, tmp292)
tmp294 = tl.full(tmp293.shape, 0.0, tmp293.dtype)
tmp295 = tl.where(tmp274, tmp293, tmp294)
tmp296 = tmp279 & tmp274
tmp297 = tl.where(tmp274, tmp281, tmp282)
tmp298 = tl.where(tmp2, tmp297, tmp265)
tmp299 = tl.where(tmp2, tmp295, tmp298)
tmp300 = tl.full(tmp299.shape, 0.0, tmp299.dtype)
tmp301 = tl.where(tmp273, tmp299, tmp300)
tmp302 = tmp2 & tmp270
tmp303 = tmp272 & tmp302
tmp304 = tmp2 & tmp303
tmp305 = tmp279 & tmp304
tmp306 = tl.where(tmp304, tmp281, tmp282)
tmp307 = tl.where(tmp2, tmp306, tmp265)
tmp308 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp303 & xmask, other=0.0)
tmp309 = tmp307 + tmp308
tmp310 = tl.full(tmp309.shape, 0.0, tmp309.dtype)
tmp311 = tl.where(tmp303, tmp309, tmp310)
tmp312 = tmp2 & tmp302
tmp313 = tmp279 & tmp312
tmp314 = tl.where(tmp312, tmp281, tmp282)
tmp315 = tl.where(tmp2, tmp314, tmp265)
tmp316 = tl.where(tmp272, tmp311, tmp315)
tmp317 = tl.full(tmp316.shape, 0.0, tmp316.dtype)
tmp318 = tl.where(tmp302, tmp316, tmp317)
tmp319 = tmp279 & tmp302
tmp320 = tl.where(tmp302, tmp281, tmp282)
tmp321 = tl.where(tmp2, tmp320, tmp265)
tmp322 = tl.where(tmp2, tmp318, tmp321)
tmp323 = tl.where(tmp272, tmp301, tmp322)
tmp324 = tl.full(tmp323.shape, 0.0, tmp323.dtype)
tmp325 = tl.where(tmp270, tmp323, tmp324)
tmp326 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp273 & xmask, other=0.0)
tmp327 = tmp298 + tmp326
tmp328 = tl.full(tmp327.shape, 0.0, tmp327.dtype)
tmp329 = tl.where(tmp273, tmp327, tmp328)
tmp330 = tl.where(tmp272, tmp329, tmp321)
tmp331 = tl.full(tmp330.shape, 0.0, tmp330.dtype)
tmp332 = tl.where(tmp270, tmp330, tmp331)
tmp333 = tmp279 & tmp270
tmp334 = tl.where(tmp270, tmp281, tmp282)
tmp335 = tl.where(tmp2, tmp334, tmp265)
tmp336 = tl.where(tmp2, tmp332, tmp335)
tmp337 = tl.where(tmp2, tmp325, tmp336)
tmp338 = tl.load(in_ptr1 + (48 + x0 + (4*x1)), tmp269 & xmask, other=0.0)
tmp339 = tmp337 + tmp338
tmp340 = tl.full(tmp339.shape, 0.0, tmp339.dtype)
tmp341 = tl.where(tmp269, tmp339, tmp340)
tmp342 = tmp2 & tmp268
tmp343 = tmp272 & tmp342
tmp344 = tmp2 & tmp343
tmp345 = tmp272 & tmp344
tmp346 = tmp2 & tmp345
tmp347 = tmp279 & tmp346
tmp348 = tl.where(tmp346, tmp281, tmp282)
tmp349 = tl.where(tmp2, tmp348, tmp265)
tmp350 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp345 & xmask, other=0.0)
tmp351 = tmp349 + tmp350
tmp352 = tl.full(tmp351.shape, 0.0, tmp351.dtype)
tmp353 = tl.where(tmp345, tmp351, tmp352)
tmp354 = tmp2 & tmp344
tmp355 = tmp279 & tmp354
tmp356 = tl.where(tmp354, tmp281, tmp282)
tmp357 = tl.where(tmp2, tmp356, tmp265)
tmp358 = tl.where(tmp272, tmp353, tmp357)
tmp359 = tl.full(tmp358.shape, 0.0, tmp358.dtype)
tmp360 = tl.where(tmp344, tmp358, tmp359)
tmp361 = tmp279 & tmp344
tmp362 = tl.where(tmp344, tmp281, tmp282)
tmp363 = tl.where(tmp2, tmp362, tmp265)
tmp364 = tl.where(tmp2, tmp360, tmp363)
tmp365 = tl.full(tmp364.shape, 0.0, tmp364.dtype)
tmp366 = tl.where(tmp343, tmp364, tmp365)
tmp367 = tmp2 & tmp342
tmp368 = tmp272 & tmp367
tmp369 = tmp2 & tmp368
tmp370 = tmp279 & tmp369
tmp371 = tl.where(tmp369, tmp281, tmp282)
tmp372 = tl.where(tmp2, tmp371, tmp265)
tmp373 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp368 & xmask, other=0.0)
tmp374 = tmp372 + tmp373
tmp375 = tl.full(tmp374.shape, 0.0, tmp374.dtype)
tmp376 = tl.where(tmp368, tmp374, tmp375)
tmp377 = tmp2 & tmp367
tmp378 = tmp279 & tmp377
tmp379 = tl.where(tmp377, tmp281, tmp282)
tmp380 = tl.where(tmp2, tmp379, tmp265)
tmp381 = tl.where(tmp272, tmp376, tmp380)
tmp382 = tl.full(tmp381.shape, 0.0, tmp381.dtype)
tmp383 = tl.where(tmp367, tmp381, tmp382)
tmp384 = tmp279 & tmp367
tmp385 = tl.where(tmp367, tmp281, tmp282)
tmp386 = tl.where(tmp2, tmp385, tmp265)
tmp387 = tl.where(tmp2, tmp383, tmp386)
tmp388 = tl.where(tmp272, tmp366, tmp387)
tmp389 = tl.full(tmp388.shape, 0.0, tmp388.dtype)
tmp390 = tl.where(tmp342, tmp388, tmp389)
tmp391 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp343 & xmask, other=0.0)
tmp392 = tmp363 + tmp391
tmp393 = tl.full(tmp392.shape, 0.0, tmp392.dtype)
tmp394 = tl.where(tmp343, tmp392, tmp393)
tmp395 = tl.where(tmp272, tmp394, tmp386)
tmp396 = tl.full(tmp395.shape, 0.0, tmp395.dtype)
tmp397 = tl.where(tmp342, tmp395, tmp396)
tmp398 = tmp279 & tmp342
tmp399 = tl.where(tmp342, tmp281, tmp282)
tmp400 = tl.where(tmp2, tmp399, tmp265)
tmp401 = tl.where(tmp2, tmp397, tmp400)
tmp402 = tl.where(tmp2, tmp390, tmp401)
tmp403 = tl.where(tmp13, tmp341, tmp402)
tmp404 = tl.full(tmp403.shape, 0.0, tmp403.dtype)
tmp405 = tl.where(tmp268, tmp403, tmp404)
tmp406 = tmp272 & tmp2
tmp407 = tmp2 & tmp406
tmp408 = tmp272 & tmp407
tmp409 = tmp2 & tmp408
tmp410 = tmp279 & tmp409
tmp411 = tl.where(tmp409, tmp281, tmp282)
tmp412 = tl.where(tmp2, tmp411, tmp265)
tmp413 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp408 & xmask, other=0.0)
tmp414 = tmp412 + tmp413
tmp415 = tl.full(tmp414.shape, 0.0, tmp414.dtype)
tmp416 = tl.where(tmp408, tmp414, tmp415)
tmp417 = tmp2 & tmp407
tmp418 = tmp279 & tmp417
tmp419 = tl.where(tmp417, tmp281, tmp282)
tmp420 = tl.where(tmp2, tmp419, tmp265)
tmp421 = tl.where(tmp272, tmp416, tmp420)
tmp422 = tl.full(tmp421.shape, 0.0, tmp421.dtype)
tmp423 = tl.where(tmp407, tmp421, tmp422)
tmp424 = tmp279 & tmp407
tmp425 = tl.where(tmp407, tmp281, tmp282)
tmp426 = tl.where(tmp2, tmp425, tmp265)
tmp427 = tl.where(tmp2, tmp423, tmp426)
tmp428 = tl.full(tmp427.shape, 0.0, tmp427.dtype)
tmp429 = tl.where(tmp406, tmp427, tmp428)
tmp430 = tmp272 & tmp121
tmp431 = tmp2 & tmp430
tmp432 = tmp279 & tmp431
tmp433 = tl.where(tmp431, tmp281, tmp282)
tmp434 = tl.where(tmp2, tmp433, tmp265)
tmp435 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp430 & xmask, other=0.0)
tmp436 = tmp434 + tmp435
tmp437 = tl.full(tmp436.shape, 0.0, tmp436.dtype)
tmp438 = tl.where(tmp430, tmp436, tmp437)
tmp439 = tmp279 & tmp163
tmp440 = tl.where(tmp163, tmp281, tmp282)
tmp441 = tl.where(tmp2, tmp440, tmp265)
tmp442 = tl.where(tmp272, tmp438, tmp441)
tmp443 = tl.full(tmp442.shape, 0.0, tmp442.dtype)
tmp444 = tl.where(tmp121, tmp442, tmp443)
tmp445 = tmp279 & tmp121
tmp446 = tl.where(tmp121, tmp281, tmp282)
tmp447 = tl.where(tmp2, tmp446, tmp265)
tmp448 = tl.where(tmp2, tmp444, tmp447)
tmp449 = tl.where(tmp272, tmp429, tmp448)
tmp450 = tl.full(tmp449.shape, 0.0, tmp449.dtype)
tmp451 = tl.where(tmp2, tmp449, tmp450)
tmp452 = tl.load(in_ptr1 + (36 + x0 + (4*x1)), tmp406 & xmask, other=0.0)
tmp453 = tmp426 + tmp452
tmp454 = tl.full(tmp453.shape, 0.0, tmp453.dtype)
tmp455 = tl.where(tmp406, tmp453, tmp454)
tmp456 = tl.where(tmp272, tmp455, tmp447)
tmp457 = tl.full(tmp456.shape, 0.0, tmp456.dtype)
tmp458 = tl.where(tmp2, tmp456, tmp457)
tmp459 = tmp279 & tmp2
tmp460 = tl.where(tmp2, tmp281, tmp282)
tmp461 = tl.where(tmp2, tmp460, tmp265)
tmp462 = tl.where(tmp2, tmp458, tmp461)
tmp463 = tl.where(tmp2, tmp451, tmp462)
tmp464 = tl.where(tmp268, tmp405, tmp463)
tl.store(in_out_ptr0 + (x2), tmp464, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6u/c6ueeomzrz3xolldfp7disiqma6bv2ommvisq6xyzxkbax272luc.py
# Topologically Sorted Source Nodes: [iadd_6], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_6 => add_6
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_100, %select_13), kwargs = {})
# %slice_scatter_default_24 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_12, %add_6, 1, 8, 12), kwargs = {})
# %slice_scatter_default_26 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_13, %slice_103, 1, 8, 12), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 43, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp200 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 4 + x1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp6 >= tmp7
tmp9 = tmp6 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp5
tmp12 = tmp0 >= tmp7
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp11
tmp16 = tmp10 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tmp10 & tmp17
tmp19 = tmp0 < tmp7
tmp20 = tmp19 & tmp18
tmp21 = tl.load(in_ptr0 + (64 + x2), tmp20 & xmask, other=0.0)
tmp22 = tl.load(in_ptr0 + (64 + x2), tmp18 & xmask, other=0.0)
tmp23 = tl.where(tmp19, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp18, tmp23, tmp24)
tmp26 = tl.load(in_ptr0 + (64 + x2), tmp17 & xmask, other=0.0)
tmp27 = tl.where(tmp10, tmp25, tmp26)
tmp28 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp17 & xmask, other=0.0)
tmp29 = tmp27 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp17, tmp29, tmp30)
tmp32 = tmp10 & tmp16
tmp33 = tmp19 & tmp32
tmp34 = tl.load(in_ptr0 + (64 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr0 + (64 + x2), tmp32 & xmask, other=0.0)
tmp36 = tl.where(tmp19, tmp34, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp32, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (64 + x2), tmp16 & xmask, other=0.0)
tmp40 = tl.where(tmp10, tmp38, tmp39)
tmp41 = tl.where(tmp14, tmp31, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp16, tmp41, tmp42)
tmp44 = tmp19 & tmp16
tmp45 = tl.load(in_ptr0 + (64 + x2), tmp44 & xmask, other=0.0)
tmp46 = tl.where(tmp19, tmp45, tmp39)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp16, tmp46, tmp47)
tmp49 = tl.load(in_ptr0 + (64 + x2), tmp15 & xmask, other=0.0)
tmp50 = tl.where(tmp10, tmp48, tmp49)
tmp51 = tl.where(tmp10, tmp43, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp15, tmp51, tmp52)
tmp54 = tmp10 & tmp11
tmp55 = tmp14 & tmp54
tmp56 = tmp10 & tmp55
tmp57 = tmp19 & tmp56
tmp58 = tl.load(in_ptr0 + (64 + x2), tmp57 & xmask, other=0.0)
tmp59 = tl.load(in_ptr0 + (64 + x2), tmp56 & xmask, other=0.0)
tmp60 = tl.where(tmp19, tmp58, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.load(in_ptr0 + (64 + x2), tmp55 & xmask, other=0.0)
tmp64 = tl.where(tmp10, tmp62, tmp63)
tmp65 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp55 & xmask, other=0.0)
tmp66 = tmp64 + tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp55, tmp66, tmp67)
tmp69 = tmp10 & tmp54
tmp70 = tmp19 & tmp69
tmp71 = tl.load(in_ptr0 + (64 + x2), tmp70 & xmask, other=0.0)
tmp72 = tl.load(in_ptr0 + (64 + x2), tmp69 & xmask, other=0.0)
tmp73 = tl.where(tmp19, tmp71, tmp72)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp69, tmp73, tmp74)
tmp76 = tl.load(in_ptr0 + (64 + x2), tmp54 & xmask, other=0.0)
tmp77 = tl.where(tmp10, tmp75, tmp76)
tmp78 = tl.where(tmp14, tmp68, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tmp19 & tmp54
tmp82 = tl.load(in_ptr0 + (64 + x2), tmp81 & xmask, other=0.0)
tmp83 = tl.where(tmp19, tmp82, tmp76)
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp54, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (64 + x2), tmp11 & xmask, other=0.0)
tmp87 = tl.where(tmp10, tmp85, tmp86)
tmp88 = tl.where(tmp10, tmp80, tmp87)
tmp89 = tl.where(tmp14, tmp53, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp11, tmp89, tmp90)
tmp92 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp93 = tmp50 + tmp92
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp15, tmp93, tmp94)
tmp96 = tl.where(tmp14, tmp95, tmp87)
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp11, tmp96, tmp97)
tmp99 = tmp19 & tmp11
tmp100 = tl.load(in_ptr0 + (64 + x2), tmp99 & xmask, other=0.0)
tmp101 = tl.where(tmp19, tmp100, tmp86)
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp11, tmp101, tmp102)
tmp104 = tl.load(in_ptr0 + (64 + x2), tmp5 & xmask, other=0.0)
tmp105 = tl.where(tmp10, tmp103, tmp104)
tmp106 = tl.where(tmp10, tmp98, tmp105)
tmp107 = tl.where(tmp10, tmp91, tmp106)
tmp108 = tl.load(in_ptr1 + (88 + x0 + (4*x1)), tmp5 & xmask, other=0.0)
tmp109 = tmp107 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp5, tmp109, tmp110)
tmp112 = tmp14 & tmp10
tmp113 = tmp10 & tmp112
tmp114 = tmp14 & tmp113
tmp115 = tmp10 & tmp114
tmp116 = tmp19 & tmp115
tmp117 = tl.load(in_ptr0 + (64 + x2), tmp116 & xmask, other=0.0)
tmp118 = tl.load(in_ptr0 + (64 + x2), tmp115 & xmask, other=0.0)
tmp119 = tl.where(tmp19, tmp117, tmp118)
tmp120 = tl.full(tmp119.shape, 0.0, tmp119.dtype)
tmp121 = tl.where(tmp115, tmp119, tmp120)
tmp122 = tl.load(in_ptr0 + (64 + x2), tmp114 & xmask, other=0.0)
tmp123 = tl.where(tmp10, tmp121, tmp122)
tmp124 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp114 & xmask, other=0.0)
tmp125 = tmp123 + tmp124
tmp126 = tl.full(tmp125.shape, 0.0, tmp125.dtype)
tmp127 = tl.where(tmp114, tmp125, tmp126)
tmp128 = tmp10 & tmp113
tmp129 = tmp19 & tmp128
tmp130 = tl.load(in_ptr0 + (64 + x2), tmp129 & xmask, other=0.0)
tmp131 = tl.load(in_ptr0 + (64 + x2), tmp128 & xmask, other=0.0)
tmp132 = tl.where(tmp19, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp128, tmp132, tmp133)
tmp135 = tl.load(in_ptr0 + (64 + x2), tmp113 & xmask, other=0.0)
tmp136 = tl.where(tmp10, tmp134, tmp135)
tmp137 = tl.where(tmp14, tmp127, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp113, tmp137, tmp138)
tmp140 = tmp19 & tmp113
tmp141 = tl.load(in_ptr0 + (64 + x2), tmp140 & xmask, other=0.0)
tmp142 = tl.where(tmp19, tmp141, tmp135)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp113, tmp142, tmp143)
tmp145 = tl.load(in_ptr0 + (64 + x2), tmp112 & xmask, other=0.0)
tmp146 = tl.where(tmp10, tmp144, tmp145)
tmp147 = tl.where(tmp10, tmp139, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp112, tmp147, tmp148)
tmp150 = tmp10 & tmp10
tmp151 = tmp14 & tmp150
tmp152 = tmp10 & tmp151
tmp153 = tmp19 & tmp152
tmp154 = tl.load(in_ptr0 + (64 + x2), tmp153 & xmask, other=0.0)
tmp155 = tl.load(in_ptr0 + (64 + x2), tmp152 & xmask, other=0.0)
tmp156 = tl.where(tmp19, tmp154, tmp155)
tmp157 = tl.full(tmp156.shape, 0.0, tmp156.dtype)
tmp158 = tl.where(tmp152, tmp156, tmp157)
tmp159 = tl.load(in_ptr0 + (64 + x2), tmp151 & xmask, other=0.0)
tmp160 = tl.where(tmp10, tmp158, tmp159)
tmp161 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp151 & xmask, other=0.0)
tmp162 = tmp160 + tmp161
tmp163 = tl.full(tmp162.shape, 0.0, tmp162.dtype)
tmp164 = tl.where(tmp151, tmp162, tmp163)
tmp165 = tmp10 & tmp150
tmp166 = tmp19 & tmp165
tmp167 = tl.load(in_ptr0 + (64 + x2), tmp166 & xmask, other=0.0)
tmp168 = tl.load(in_ptr0 + (64 + x2), tmp165 & xmask, other=0.0)
tmp169 = tl.where(tmp19, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp165, tmp169, tmp170)
tmp172 = tl.load(in_ptr0 + (64 + x2), tmp150 & xmask, other=0.0)
tmp173 = tl.where(tmp10, tmp171, tmp172)
tmp174 = tl.where(tmp14, tmp164, tmp173)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp150, tmp174, tmp175)
tmp177 = tmp19 & tmp150
tmp178 = tl.load(in_ptr0 + (64 + x2), tmp177 & xmask, other=0.0)
tmp179 = tl.where(tmp19, tmp178, tmp172)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp150, tmp179, tmp180)
tmp182 = tl.load(in_ptr0 + (64 + x2), tmp10 & xmask, other=0.0)
tmp183 = tl.where(tmp10, tmp181, tmp182)
tmp184 = tl.where(tmp10, tmp176, tmp183)
tmp185 = tl.where(tmp14, tmp149, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp10, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (76 + x0 + (4*x1)), tmp112 & xmask, other=0.0)
tmp189 = tmp146 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp112, tmp189, tmp190)
tmp192 = tl.where(tmp14, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp10, tmp192, tmp193)
tmp195 = tmp19 & tmp10
tmp196 = tl.load(in_ptr0 + (64 + x2), tmp195 & xmask, other=0.0)
tmp197 = tl.where(tmp19, tmp196, tmp182)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp10, tmp197, tmp198)
tmp201 = tl.where(tmp10, tmp199, tmp200)
tmp202 = tl.where(tmp10, tmp194, tmp201)
tmp203 = tl.where(tmp10, tmp187, tmp202)
tmp204 = tl.where(tmp5, tmp111, tmp203)
tmp205 = tl.where(tmp10, tmp204, tmp107)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp5, tmp205, tmp206)
tmp208 = tl.where(tmp10, tmp204, tmp203)
tmp209 = tl.where(tmp5, tmp207, tmp208)
tl.store(out_ptr0 + (x2), tmp204, xmask)
tl.store(out_ptr1 + (x2), tmp209, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ft/cftz6fore27oifq7z2wzi27lwhb6xj2rstig24xl2zbr4ddn3vvx.py
# Topologically Sorted Source Nodes: [iadd_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_5 => add_5
# Graph fragment:
# %slice_scatter_default_18 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_9, %slice_71, 1, 0, 4), kwargs = {})
# %slice_scatter_default_19 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %slice_scatter_default_18, 0, 4, 8), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_84, %select_11), kwargs = {})
# %slice_scatter_default_20 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_10, %add_5, 1, 4, 8), kwargs = {})
# %slice_scatter_default_21 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %slice_scatter_default_20, 0, 4, 8), kwargs = {})
# %slice_scatter_default_22 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_11, %slice_87, 1, 4, 8), kwargs = {})
# %slice_scatter_default_23 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %slice_scatter_default_22, 0, 4, 8), kwargs = {})
# %slice_scatter_default_25 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %slice_scatter_default_24, 0, 4, 8), kwargs = {})
# %slice_scatter_default_27 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %slice_scatter_default_26, 0, 4, 8), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 23, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp101 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-64) + x2), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + ((-64) + x2), tmp5 & xmask, other=0.0)
tmp8 = x0
tmp9 = tmp8 >= tmp1
tmp10 = tmp8 < tmp3
tmp11 = tmp9 & tmp10
tmp12 = tmp11 & tmp5
tmp13 = tmp5 & tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp5 & tmp14
tmp16 = tmp8 < tmp1
tmp17 = tmp16 & tmp15
tmp18 = tl.load(in_out_ptr0 + (x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_out_ptr0 + (x2), tmp15 & xmask, other=0.0)
tmp20 = tl.where(tmp16, tmp18, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp15, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + (x2), tmp14 & xmask, other=0.0)
tmp24 = tl.where(tmp5, tmp22, tmp23)
tmp25 = tl.load(in_ptr2 + (60 + x0 + (4*x1)), tmp14 & xmask, other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp14, tmp26, tmp27)
tmp29 = tmp5 & tmp13
tmp30 = tmp16 & tmp29
tmp31 = tl.load(in_out_ptr0 + (x2), tmp30 & xmask, other=0.0)
tmp32 = tl.load(in_out_ptr0 + (x2), tmp29 & xmask, other=0.0)
tmp33 = tl.where(tmp16, tmp31, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp29, tmp33, tmp34)
tmp36 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp37 = tl.where(tmp5, tmp35, tmp36)
tmp38 = tl.where(tmp11, tmp28, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp13, tmp38, tmp39)
tmp41 = tmp16 & tmp13
tmp42 = tl.load(in_out_ptr0 + (x2), tmp41 & xmask, other=0.0)
tmp43 = tl.where(tmp16, tmp42, tmp36)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp13, tmp43, tmp44)
tmp46 = tl.load(in_out_ptr0 + (x2), tmp12 & xmask, other=0.0)
tmp47 = tl.where(tmp5, tmp45, tmp46)
tmp48 = tl.where(tmp5, tmp40, tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp12, tmp48, tmp49)
tmp51 = tmp5 & tmp5
tmp52 = tmp11 & tmp51
tmp53 = tmp5 & tmp52
tmp54 = tmp16 & tmp53
tmp55 = tl.load(in_out_ptr0 + (x2), tmp54 & xmask, other=0.0)
tmp56 = tl.load(in_out_ptr0 + (x2), tmp53 & xmask, other=0.0)
tmp57 = tl.where(tmp16, tmp55, tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp53, tmp57, tmp58)
tmp60 = tl.load(in_out_ptr0 + (x2), tmp52 & xmask, other=0.0)
tmp61 = tl.where(tmp5, tmp59, tmp60)
tmp62 = tl.load(in_ptr2 + (60 + x0 + (4*x1)), tmp52 & xmask, other=0.0)
tmp63 = tmp61 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp52, tmp63, tmp64)
tmp66 = tmp5 & tmp51
tmp67 = tmp16 & tmp66
tmp68 = tl.load(in_out_ptr0 + (x2), tmp67 & xmask, other=0.0)
tmp69 = tl.load(in_out_ptr0 + (x2), tmp66 & xmask, other=0.0)
tmp70 = tl.where(tmp16, tmp68, tmp69)
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp66, tmp70, tmp71)
tmp73 = tl.load(in_out_ptr0 + (x2), tmp51 & xmask, other=0.0)
tmp74 = tl.where(tmp5, tmp72, tmp73)
tmp75 = tl.where(tmp11, tmp65, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp51, tmp75, tmp76)
tmp78 = tmp16 & tmp51
tmp79 = tl.load(in_out_ptr0 + (x2), tmp78 & xmask, other=0.0)
tmp80 = tl.where(tmp16, tmp79, tmp73)
tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype)
tmp82 = tl.where(tmp51, tmp80, tmp81)
tmp83 = tl.load(in_out_ptr0 + (x2), tmp5 & xmask, other=0.0)
tmp84 = tl.where(tmp5, tmp82, tmp83)
tmp85 = tl.where(tmp5, tmp77, tmp84)
tmp86 = tl.where(tmp11, tmp50, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp5, tmp86, tmp87)
tmp89 = tl.load(in_ptr2 + (60 + x0 + (4*x1)), tmp12 & xmask, other=0.0)
tmp90 = tmp47 + tmp89
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp12, tmp90, tmp91)
tmp93 = tl.where(tmp11, tmp92, tmp84)
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp5, tmp93, tmp94)
tmp96 = tmp16 & tmp5
tmp97 = tl.load(in_out_ptr0 + (x2), tmp96 & xmask, other=0.0)
tmp98 = tl.where(tmp16, tmp97, tmp83)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp5, tmp98, tmp99)
tmp102 = tl.where(tmp5, tmp100, tmp101)
tmp103 = tl.where(tmp5, tmp95, tmp102)
tmp104 = tl.where(tmp5, tmp88, tmp103)
tmp105 = tl.where(tmp5, tmp7, tmp104)
tmp106 = tl.where(tmp5, tmp6, tmp105)
tl.store(in_out_ptr0 + (x2), tmp106, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3f/c3fjcanlofb532fzzefseogkvlol76s72bgjgl7ny5uerbji6frd.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %slice_scatter_default_34 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_17, %slice_135, 1, 0, 4), kwargs = {})
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 62, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp287 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp3 >= tmp1
tmp12 = tmp3 < tmp4
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp0 >= tmp6
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr0 + (128 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (128 + x2), tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_ptr0 + (128 + x2), tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_ptr0 + (128 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (128 + x2), tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_ptr0 + (128 + x2), tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_ptr0 + (128 + x2), tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (128 + x0 + (4*x1)), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp9
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_ptr0 + (128 + x2), tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_ptr0 + (128 + x2), tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_ptr0 + (128 + x2), tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_ptr0 + (128 + x2), tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (128 + x2), tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_ptr0 + (128 + x2), tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (128 + x2), tmp9 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp2, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tmp13 & tmp2
tmp109 = tmp15 & tmp108
tmp110 = tmp13 & tmp109
tmp111 = tmp15 & tmp110
tmp112 = tl.load(in_ptr0 + (128 + x2), tmp111 & xmask, other=0.0)
tmp113 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp111 & xmask, other=0.0)
tmp114 = tmp112 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp111, tmp114, tmp115)
tmp117 = tl.load(in_ptr0 + (128 + x2), tmp110 & xmask, other=0.0)
tmp118 = tl.where(tmp15, tmp116, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp110, tmp118, tmp119)
tmp121 = tl.load(in_ptr0 + (128 + x2), tmp109 & xmask, other=0.0)
tmp122 = tl.where(tmp13, tmp120, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp109, tmp122, tmp123)
tmp125 = tmp13 & tmp108
tmp126 = tmp15 & tmp125
tmp127 = tl.load(in_ptr0 + (128 + x2), tmp126 & xmask, other=0.0)
tmp128 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp126 & xmask, other=0.0)
tmp129 = tmp127 + tmp128
tmp130 = tl.full(tmp129.shape, 0.0, tmp129.dtype)
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.load(in_ptr0 + (128 + x2), tmp125 & xmask, other=0.0)
tmp133 = tl.where(tmp15, tmp131, tmp132)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp125, tmp133, tmp134)
tmp136 = tl.load(in_ptr0 + (128 + x2), tmp108 & xmask, other=0.0)
tmp137 = tl.where(tmp13, tmp135, tmp136)
tmp138 = tl.where(tmp15, tmp124, tmp137)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp108, tmp138, tmp139)
tmp141 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp109 & xmask, other=0.0)
tmp142 = tmp121 + tmp141
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp109, tmp142, tmp143)
tmp145 = tl.where(tmp15, tmp144, tmp136)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp108, tmp145, tmp146)
tmp148 = tl.load(in_ptr0 + (128 + x2), tmp2 & xmask, other=0.0)
tmp149 = tl.where(tmp13, tmp147, tmp148)
tmp150 = tl.where(tmp13, tmp140, tmp149)
tmp151 = tl.where(tmp8, tmp107, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp2, tmp151, tmp152)
tmp154 = tmp2 & tmp8
tmp155 = tmp13 & tmp154
tmp156 = tmp15 & tmp155
tmp157 = tmp13 & tmp156
tmp158 = tmp15 & tmp157
tmp159 = tl.load(in_ptr0 + (128 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (128 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp15, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (128 + x2), tmp156 & xmask, other=0.0)
tmp169 = tl.where(tmp13, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp156, tmp169, tmp170)
tmp172 = tmp13 & tmp155
tmp173 = tmp15 & tmp172
tmp174 = tl.load(in_ptr0 + (128 + x2), tmp173 & xmask, other=0.0)
tmp175 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp173 & xmask, other=0.0)
tmp176 = tmp174 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp173, tmp176, tmp177)
tmp179 = tl.load(in_ptr0 + (128 + x2), tmp172 & xmask, other=0.0)
tmp180 = tl.where(tmp15, tmp178, tmp179)
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp172, tmp180, tmp181)
tmp183 = tl.load(in_ptr0 + (128 + x2), tmp155 & xmask, other=0.0)
tmp184 = tl.where(tmp13, tmp182, tmp183)
tmp185 = tl.where(tmp15, tmp171, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp155, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp156 & xmask, other=0.0)
tmp189 = tmp168 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp156, tmp189, tmp190)
tmp192 = tl.where(tmp15, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp155, tmp192, tmp193)
tmp195 = tl.load(in_ptr0 + (128 + x2), tmp154 & xmask, other=0.0)
tmp196 = tl.where(tmp13, tmp194, tmp195)
tmp197 = tl.where(tmp13, tmp187, tmp196)
tmp198 = tl.load(in_ptr1 + (128 + x0 + (4*x1)), tmp154 & xmask, other=0.0)
tmp199 = tmp197 + tmp198
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp154, tmp199, tmp200)
tmp202 = tmp13 & tmp8
tmp203 = tmp15 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tmp15 & tmp204
tmp206 = tl.load(in_ptr0 + (128 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (128 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp15, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (128 + x2), tmp203 & xmask, other=0.0)
tmp216 = tl.where(tmp13, tmp214, tmp215)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp203, tmp216, tmp217)
tmp219 = tmp13 & tmp202
tmp220 = tmp15 & tmp219
tmp221 = tl.load(in_ptr0 + (128 + x2), tmp220 & xmask, other=0.0)
tmp222 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp220 & xmask, other=0.0)
tmp223 = tmp221 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp220, tmp223, tmp224)
tmp226 = tl.load(in_ptr0 + (128 + x2), tmp219 & xmask, other=0.0)
tmp227 = tl.where(tmp15, tmp225, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp219, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (128 + x2), tmp202 & xmask, other=0.0)
tmp231 = tl.where(tmp13, tmp229, tmp230)
tmp232 = tl.where(tmp15, tmp218, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp202, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp203 & xmask, other=0.0)
tmp236 = tmp215 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp203, tmp236, tmp237)
tmp239 = tl.where(tmp15, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp202, tmp239, tmp240)
tmp242 = tl.load(in_ptr0 + (128 + x2), tmp8 & xmask, other=0.0)
tmp243 = tl.where(tmp13, tmp241, tmp242)
tmp244 = tl.where(tmp13, tmp234, tmp243)
tmp245 = tl.where(tmp2, tmp201, tmp244)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp8, tmp245, tmp246)
tmp248 = tmp15 & tmp13
tmp249 = tmp13 & tmp248
tmp250 = tmp15 & tmp249
tmp251 = tl.load(in_ptr0 + (128 + x2), tmp250 & xmask, other=0.0)
tmp252 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp250 & xmask, other=0.0)
tmp253 = tmp251 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp250, tmp253, tmp254)
tmp256 = tl.load(in_ptr0 + (128 + x2), tmp249 & xmask, other=0.0)
tmp257 = tl.where(tmp15, tmp255, tmp256)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp249, tmp257, tmp258)
tmp260 = tl.load(in_ptr0 + (128 + x2), tmp248 & xmask, other=0.0)
tmp261 = tl.where(tmp13, tmp259, tmp260)
tmp262 = tl.full(tmp261.shape, 0.0, tmp261.dtype)
tmp263 = tl.where(tmp248, tmp261, tmp262)
tmp264 = tmp13 & tmp13
tmp265 = tmp15 & tmp264
tmp266 = tl.load(in_ptr0 + (128 + x2), tmp265 & xmask, other=0.0)
tmp267 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp265 & xmask, other=0.0)
tmp268 = tmp266 + tmp267
tmp269 = tl.full(tmp268.shape, 0.0, tmp268.dtype)
tmp270 = tl.where(tmp265, tmp268, tmp269)
tmp271 = tl.load(in_ptr0 + (128 + x2), tmp264 & xmask, other=0.0)
tmp272 = tl.where(tmp15, tmp270, tmp271)
tmp273 = tl.full(tmp272.shape, 0.0, tmp272.dtype)
tmp274 = tl.where(tmp264, tmp272, tmp273)
tmp275 = tl.load(in_ptr0 + (128 + x2), tmp13 & xmask, other=0.0)
tmp276 = tl.where(tmp13, tmp274, tmp275)
tmp277 = tl.where(tmp15, tmp263, tmp276)
tmp278 = tl.full(tmp277.shape, 0.0, tmp277.dtype)
tmp279 = tl.where(tmp13, tmp277, tmp278)
tmp280 = tl.load(in_ptr1 + (116 + x0 + (4*x1)), tmp248 & xmask, other=0.0)
tmp281 = tmp260 + tmp280
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp248, tmp281, tmp282)
tmp284 = tl.where(tmp15, tmp283, tmp275)
tmp285 = tl.full(tmp284.shape, 0.0, tmp284.dtype)
tmp286 = tl.where(tmp13, tmp284, tmp285)
tmp288 = tl.where(tmp13, tmp286, tmp287)
tmp289 = tl.where(tmp13, tmp279, tmp288)
tmp290 = tl.where(tmp8, tmp247, tmp289)
tmp291 = tl.where(tmp2, tmp153, tmp290)
tl.store(out_ptr0 + (x2), tmp291, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/py/cpytp2auwhinkwdqpacccqnydcfnih56cbanca53bbl4nghkctqs.py
# Topologically Sorted Source Nodes: [iadd_7, iadd_8, iadd_9, iadd_10], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_10 => add_10
# iadd_7 => add_7
# iadd_8 => add_8
# iadd_9 => add_9
# Graph fragment:
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_116, %select_15), kwargs = {})
# %slice_scatter_default_28 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_14, %add_7, 1, 12, 16), kwargs = {})
# %slice_scatter_default_29 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %slice_scatter_default_28, 0, 4, 8), kwargs = {})
# %slice_scatter_default_30 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_15, %slice_119, 1, 12, 16), kwargs = {})
# %slice_scatter_default_31 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %slice_scatter_default_30, 0, 4, 8), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_132, %select_17), kwargs = {})
# %slice_scatter_default_32 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_16, %add_8, 1, 0, 4), kwargs = {})
# %slice_scatter_default_33 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %slice_scatter_default_32, 0, 8, 12), kwargs = {})
# %slice_scatter_default_35 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %slice_scatter_default_34, 0, 8, 12), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_148, %select_19), kwargs = {})
# %slice_scatter_default_36 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_18, %add_9, 1, 4, 8), kwargs = {})
# %slice_scatter_default_37 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %slice_scatter_default_36, 0, 8, 12), kwargs = {})
# %slice_scatter_default_38 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_19, %slice_151, 1, 4, 8), kwargs = {})
# %slice_scatter_default_39 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %slice_scatter_default_38, 0, 8, 12), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_164, %select_21), kwargs = {})
# %slice_scatter_default_40 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_20, %add_10, 1, 8, 12), kwargs = {})
# %slice_scatter_default_41 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %slice_scatter_default_40, 0, 8, 12), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 41, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp147 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + ((-128) + x2), tmp5 & xmask, other=0.0)
tmp7 = x0
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp9 & tmp5
tmp11 = tmp0 >= tmp8
tmp12 = tmp0 < tmp1
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp7 >= tmp3
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_out_ptr0 + (x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + (x2), tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_out_ptr0 + (x2), tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_out_ptr0 + (x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_out_ptr0 + (x2), tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_out_ptr0 + (x2), tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_out_ptr0 + (x2), tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (96 + x0 + (4*x1)), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp5
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_out_ptr0 + (x2), tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_out_ptr0 + (x2), tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_out_ptr0 + (x2), tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_out_ptr0 + (x2), tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_out_ptr0 + (x2), tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_out_ptr0 + (x2), tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_out_ptr0 + (x2), tmp5 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp9, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp5, tmp105, tmp106)
tmp108 = tmp15 & tmp13
tmp109 = tmp13 & tmp108
tmp110 = tmp15 & tmp109
tmp111 = tl.load(in_out_ptr0 + (x2), tmp110 & xmask, other=0.0)
tmp112 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp110 & xmask, other=0.0)
tmp113 = tmp111 + tmp112
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.load(in_out_ptr0 + (x2), tmp109 & xmask, other=0.0)
tmp117 = tl.where(tmp15, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp109, tmp117, tmp118)
tmp120 = tl.load(in_out_ptr0 + (x2), tmp108 & xmask, other=0.0)
tmp121 = tl.where(tmp13, tmp119, tmp120)
tmp122 = tl.full(tmp121.shape, 0.0, tmp121.dtype)
tmp123 = tl.where(tmp108, tmp121, tmp122)
tmp124 = tmp13 & tmp13
tmp125 = tmp15 & tmp124
tmp126 = tl.load(in_out_ptr0 + (x2), tmp125 & xmask, other=0.0)
tmp127 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp125 & xmask, other=0.0)
tmp128 = tmp126 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp125, tmp128, tmp129)
tmp131 = tl.load(in_out_ptr0 + (x2), tmp124 & xmask, other=0.0)
tmp132 = tl.where(tmp15, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp124, tmp132, tmp133)
tmp135 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp136 = tl.where(tmp13, tmp134, tmp135)
tmp137 = tl.where(tmp15, tmp123, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp13, tmp137, tmp138)
tmp140 = tl.load(in_ptr1 + (84 + x0 + (4*x1)), tmp108 & xmask, other=0.0)
tmp141 = tmp120 + tmp140
tmp142 = tl.full(tmp141.shape, 0.0, tmp141.dtype)
tmp143 = tl.where(tmp108, tmp141, tmp142)
tmp144 = tl.where(tmp15, tmp143, tmp135)
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp13, tmp144, tmp145)
tmp148 = tl.where(tmp13, tmp146, tmp147)
tmp149 = tl.where(tmp13, tmp139, tmp148)
tmp150 = tl.where(tmp5, tmp107, tmp149)
tmp151 = tl.where(tmp5, tmp6, tmp150)
tmp152 = tmp7 >= tmp1
tmp153 = tmp7 < tmp3
tmp154 = tmp152 & tmp153
tmp155 = tmp154 & tmp5
tmp156 = tmp5 & tmp155
tmp157 = tmp7 >= tmp8
tmp158 = tmp7 < tmp1
tmp159 = tmp157 & tmp158
tmp160 = tmp159 & tmp156
tmp161 = tmp5 & tmp160
tmp162 = tmp159 & tmp161
tmp163 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp162 & xmask, other=0.0)
tmp164 = tmp151 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp162, tmp164, tmp165)
tmp167 = tl.where(tmp159, tmp166, tmp151)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp161, tmp167, tmp168)
tmp170 = tl.where(tmp5, tmp169, tmp151)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp160, tmp170, tmp171)
tmp173 = tmp5 & tmp156
tmp174 = tmp159 & tmp173
tmp175 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp174 & xmask, other=0.0)
tmp176 = tmp151 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp174, tmp176, tmp177)
tmp179 = tl.where(tmp159, tmp178, tmp151)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp173, tmp179, tmp180)
tmp182 = tl.where(tmp5, tmp181, tmp151)
tmp183 = tl.where(tmp159, tmp172, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp156, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp160 & xmask, other=0.0)
tmp187 = tmp151 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp160, tmp187, tmp188)
tmp190 = tl.where(tmp159, tmp189, tmp151)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp156, tmp190, tmp191)
tmp193 = tl.where(tmp5, tmp192, tmp151)
tmp194 = tl.where(tmp5, tmp185, tmp193)
tmp195 = tl.load(in_ptr1 + (120 + x0 + (4*x1)), tmp155 & xmask, other=0.0)
tmp196 = tmp194 + tmp195
tmp197 = tl.full(tmp196.shape, 0.0, tmp196.dtype)
tmp198 = tl.where(tmp155, tmp196, tmp197)
tmp199 = tmp5 & tmp5
tmp200 = tmp159 & tmp199
tmp201 = tmp5 & tmp200
tmp202 = tmp159 & tmp201
tmp203 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp202 & xmask, other=0.0)
tmp204 = tmp151 + tmp203
tmp205 = tl.full(tmp204.shape, 0.0, tmp204.dtype)
tmp206 = tl.where(tmp202, tmp204, tmp205)
tmp207 = tl.where(tmp159, tmp206, tmp151)
tmp208 = tl.full(tmp207.shape, 0.0, tmp207.dtype)
tmp209 = tl.where(tmp201, tmp207, tmp208)
tmp210 = tl.where(tmp5, tmp209, tmp151)
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp200, tmp210, tmp211)
tmp213 = tmp5 & tmp199
tmp214 = tmp159 & tmp213
tmp215 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp214 & xmask, other=0.0)
tmp216 = tmp151 + tmp215
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp214, tmp216, tmp217)
tmp219 = tl.where(tmp159, tmp218, tmp151)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp213, tmp219, tmp220)
tmp222 = tl.where(tmp5, tmp221, tmp151)
tmp223 = tl.where(tmp159, tmp212, tmp222)
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp199, tmp223, tmp224)
tmp226 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp200 & xmask, other=0.0)
tmp227 = tmp151 + tmp226
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp200, tmp227, tmp228)
tmp230 = tl.where(tmp159, tmp229, tmp151)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp199, tmp230, tmp231)
tmp233 = tl.where(tmp5, tmp232, tmp151)
tmp234 = tl.where(tmp5, tmp225, tmp233)
tmp235 = tl.where(tmp154, tmp198, tmp234)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp5, tmp235, tmp236)
tmp238 = tmp159 & tmp5
tmp239 = tmp5 & tmp238
tmp240 = tmp159 & tmp239
tmp241 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp240 & xmask, other=0.0)
tmp242 = tmp151 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp159, tmp244, tmp151)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp5, tmp247, tmp151)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp159, tmp250, tmp233)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp5, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (108 + x0 + (4*x1)), tmp238 & xmask, other=0.0)
tmp255 = tmp151 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp159, tmp257, tmp151)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp5, tmp258, tmp259)
tmp261 = tl.where(tmp5, tmp260, tmp151)
tmp262 = tl.where(tmp5, tmp253, tmp261)
tmp263 = tl.where(tmp5, tmp237, tmp262)
tl.store(in_out_ptr0 + (x2), tmp263, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ln/clnpe6as37knvaycmymqf2jkrvvvemfcqdlqzyl3bzkkywcxfyfp.py
# Topologically Sorted Source Nodes: [iadd_12], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_12 => add_12
# Graph fragment:
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_196, %select_25), kwargs = {})
# %slice_scatter_default_48 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_24, %add_12, 1, 0, 4), kwargs = {})
# %slice_scatter_default_50 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_25, %slice_199, 1, 0, 4), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 43, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp198 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 12 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp0 >= tmp6
tmp11 = tmp10 & tmp9
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp0 >= tmp4
tmp16 = tmp0 < tmp6
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_ptr0 + (192 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr0 + (192 + x2), tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (192 + x2), tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (192 + x2), tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_ptr0 + (192 + x2), tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_ptr0 + (192 + x2), tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp9
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_ptr0 + (192 + x2), tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_ptr0 + (192 + x2), tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_ptr0 + (192 + x2), tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_ptr0 + (192 + x2), tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp9, tmp87, tmp88)
tmp90 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp9, tmp94, tmp95)
tmp97 = tmp17 & tmp9
tmp98 = tl.load(in_ptr0 + (192 + x2), tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp9, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (192 + x2), tmp2 & xmask, other=0.0)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.load(in_ptr1 + (192 + x0 + (4*x1)), tmp2 & xmask, other=0.0)
tmp107 = tmp105 + tmp106
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp2, tmp107, tmp108)
tmp110 = tmp10 & tmp8
tmp111 = tmp8 & tmp110
tmp112 = tmp10 & tmp111
tmp113 = tmp8 & tmp112
tmp114 = tmp17 & tmp113
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp114 & xmask, other=0.0)
tmp116 = tl.load(in_ptr0 + (192 + x2), tmp113 & xmask, other=0.0)
tmp117 = tl.where(tmp17, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp113, tmp117, tmp118)
tmp120 = tl.load(in_ptr0 + (192 + x2), tmp112 & xmask, other=0.0)
tmp121 = tl.where(tmp8, tmp119, tmp120)
tmp122 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp112 & xmask, other=0.0)
tmp123 = tmp121 + tmp122
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp112, tmp123, tmp124)
tmp126 = tmp8 & tmp111
tmp127 = tmp17 & tmp126
tmp128 = tl.load(in_ptr0 + (192 + x2), tmp127 & xmask, other=0.0)
tmp129 = tl.load(in_ptr0 + (192 + x2), tmp126 & xmask, other=0.0)
tmp130 = tl.where(tmp17, tmp128, tmp129)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp126, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp111 & xmask, other=0.0)
tmp134 = tl.where(tmp8, tmp132, tmp133)
tmp135 = tl.where(tmp10, tmp125, tmp134)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp111, tmp135, tmp136)
tmp138 = tmp17 & tmp111
tmp139 = tl.load(in_ptr0 + (192 + x2), tmp138 & xmask, other=0.0)
tmp140 = tl.where(tmp17, tmp139, tmp133)
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp111, tmp140, tmp141)
tmp143 = tl.load(in_ptr0 + (192 + x2), tmp110 & xmask, other=0.0)
tmp144 = tl.where(tmp8, tmp142, tmp143)
tmp145 = tl.where(tmp8, tmp137, tmp144)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp110, tmp145, tmp146)
tmp148 = tmp8 & tmp8
tmp149 = tmp10 & tmp148
tmp150 = tmp8 & tmp149
tmp151 = tmp17 & tmp150
tmp152 = tl.load(in_ptr0 + (192 + x2), tmp151 & xmask, other=0.0)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp150 & xmask, other=0.0)
tmp154 = tl.where(tmp17, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp150, tmp154, tmp155)
tmp157 = tl.load(in_ptr0 + (192 + x2), tmp149 & xmask, other=0.0)
tmp158 = tl.where(tmp8, tmp156, tmp157)
tmp159 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp149 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp149, tmp160, tmp161)
tmp163 = tmp8 & tmp148
tmp164 = tmp17 & tmp163
tmp165 = tl.load(in_ptr0 + (192 + x2), tmp164 & xmask, other=0.0)
tmp166 = tl.load(in_ptr0 + (192 + x2), tmp163 & xmask, other=0.0)
tmp167 = tl.where(tmp17, tmp165, tmp166)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp163, tmp167, tmp168)
tmp170 = tl.load(in_ptr0 + (192 + x2), tmp148 & xmask, other=0.0)
tmp171 = tl.where(tmp8, tmp169, tmp170)
tmp172 = tl.where(tmp10, tmp162, tmp171)
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp148, tmp172, tmp173)
tmp175 = tmp17 & tmp148
tmp176 = tl.load(in_ptr0 + (192 + x2), tmp175 & xmask, other=0.0)
tmp177 = tl.where(tmp17, tmp176, tmp170)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp148, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp181 = tl.where(tmp8, tmp179, tmp180)
tmp182 = tl.where(tmp8, tmp174, tmp181)
tmp183 = tl.where(tmp10, tmp147, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp8, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp110 & xmask, other=0.0)
tmp187 = tmp144 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp110, tmp187, tmp188)
tmp190 = tl.where(tmp10, tmp189, tmp181)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp8, tmp190, tmp191)
tmp193 = tmp17 & tmp8
tmp194 = tl.load(in_ptr0 + (192 + x2), tmp193 & xmask, other=0.0)
tmp195 = tl.where(tmp17, tmp194, tmp180)
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp8, tmp195, tmp196)
tmp199 = tl.where(tmp8, tmp197, tmp198)
tmp200 = tl.where(tmp8, tmp192, tmp199)
tmp201 = tl.where(tmp8, tmp185, tmp200)
tmp202 = tl.where(tmp2, tmp109, tmp201)
tmp203 = tmp3 >= tmp6
tmp204 = tmp203 & tmp2
tmp205 = tl.where(tmp203, tmp202, tmp105)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp2, tmp205, tmp206)
tmp208 = tl.where(tmp203, tmp202, tmp201)
tmp209 = tl.where(tmp2, tmp207, tmp208)
tl.store(out_ptr0 + (x2), tmp202, xmask)
tl.store(out_ptr1 + (x2), tmp209, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ig/ciglefpf6tbo4ytp64nt6lhcnsuascdfr6hplbutd6npiqpgt2mh.py
# Topologically Sorted Source Nodes: [iadd_11], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_11 => add_11
# Graph fragment:
# %slice_scatter_default_42 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_21, %slice_167, 1, 8, 12), kwargs = {})
# %slice_scatter_default_43 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %slice_scatter_default_42, 0, 8, 12), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_180, %select_23), kwargs = {})
# %slice_scatter_default_44 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_22, %add_11, 1, 12, 16), kwargs = {})
# %slice_scatter_default_45 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %slice_scatter_default_44, 0, 8, 12), kwargs = {})
# %slice_scatter_default_46 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_23, %slice_183, 1, 12, 16), kwargs = {})
# %slice_scatter_default_47 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %slice_scatter_default_46, 0, 8, 12), kwargs = {})
# %slice_scatter_default_49 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %slice_scatter_default_48, 0, 12, 16), kwargs = {})
# %slice_scatter_default_51 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %slice_scatter_default_50, 0, 12, 16), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 23, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp102 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + ((-192) + x2), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + ((-192) + x2), tmp2 & xmask, other=0.0)
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp0 >= tmp5
tmp7 = tmp0 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp10 & tmp8
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp9 >= tmp5
tmp16 = tmp9 < tmp1
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_out_ptr0 + (x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_out_ptr0 + (x2), tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr2 + (132 + x0 + (4*x1)), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_out_ptr0 + (x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_out_ptr0 + (x2), tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_out_ptr0 + (x2), tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_out_ptr0 + (x2), tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_out_ptr0 + (x2), tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp8
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_out_ptr0 + (x2), tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_out_ptr0 + (x2), tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_out_ptr0 + (x2), tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr2 + (132 + x0 + (4*x1)), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_out_ptr0 + (x2), tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_out_ptr0 + (x2), tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + (x2), tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_out_ptr0 + (x2), tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_out_ptr0 + (x2), tmp8 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp8, tmp87, tmp88)
tmp90 = tl.load(in_ptr2 + (132 + x0 + (4*x1)), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp8, tmp94, tmp95)
tmp97 = tmp17 & tmp8
tmp98 = tl.load(in_out_ptr0 + (x2), tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp8, tmp99, tmp100)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.where(tmp2, tmp4, tmp105)
tmp107 = tl.where(tmp2, tmp3, tmp106)
tl.store(in_out_ptr0 + (x2), tmp107, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vr/cvregea6kqmjw7j5p7y5ofutcpo5akj3f4aziqv5ffirai74wdrq.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %slice_scatter_default_58 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_29, %slice_231, 1, 8, 12), kwargs = {})
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 54, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp259 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 12 + x1
tmp7 = tmp6 >= tmp3
tmp8 = tmp7 & tmp5
tmp9 = tmp5 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 >= tmp11
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp7 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + (192 + x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_ptr0 + (192 + x2), tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_ptr0 + (192 + x2), tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp7, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp7 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_ptr0 + (192 + x2), tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp7, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp7, tmp53, tmp54)
tmp56 = tl.where(tmp7, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (216 + x0 + (4*x1)), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp7 & tmp8
tmp62 = tmp14 & tmp61
tmp63 = tmp7 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_ptr0 + (192 + x2), tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp7, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp7 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_ptr0 + (192 + x2), tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_ptr0 + (192 + x2), tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp7, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp102 = tl.where(tmp7, tmp100, tmp101)
tmp103 = tl.where(tmp7, tmp93, tmp102)
tmp104 = tl.where(tmp5, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp8, tmp104, tmp105)
tmp107 = tmp14 & tmp8
tmp108 = tmp7 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_ptr0 + (192 + x2), tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_ptr0 + (192 + x2), tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp7, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp8, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp8, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp5 & xmask, other=0.0)
tmp134 = tl.where(tmp7, tmp132, tmp133)
tmp135 = tl.where(tmp7, tmp125, tmp134)
tmp136 = tl.where(tmp7, tmp106, tmp135)
tmp137 = tl.full(tmp136.shape, 0.0, tmp136.dtype)
tmp138 = tl.where(tmp5, tmp136, tmp137)
tmp139 = tmp5 & tmp7
tmp140 = tmp7 & tmp139
tmp141 = tmp14 & tmp140
tmp142 = tmp7 & tmp141
tmp143 = tmp14 & tmp142
tmp144 = tl.load(in_ptr0 + (192 + x2), tmp143 & xmask, other=0.0)
tmp145 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp143 & xmask, other=0.0)
tmp146 = tmp144 + tmp145
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp143, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (192 + x2), tmp142 & xmask, other=0.0)
tmp150 = tl.where(tmp14, tmp148, tmp149)
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp142, tmp150, tmp151)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp141 & xmask, other=0.0)
tmp154 = tl.where(tmp7, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp141, tmp154, tmp155)
tmp157 = tmp7 & tmp140
tmp158 = tmp14 & tmp157
tmp159 = tl.load(in_ptr0 + (192 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (192 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp14, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (192 + x2), tmp140 & xmask, other=0.0)
tmp169 = tl.where(tmp7, tmp167, tmp168)
tmp170 = tl.where(tmp14, tmp156, tmp169)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp140, tmp170, tmp171)
tmp173 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp141 & xmask, other=0.0)
tmp174 = tmp153 + tmp173
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp141, tmp174, tmp175)
tmp177 = tl.where(tmp14, tmp176, tmp168)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp140, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp139 & xmask, other=0.0)
tmp181 = tl.where(tmp7, tmp179, tmp180)
tmp182 = tl.where(tmp7, tmp172, tmp181)
tmp183 = tl.load(in_ptr1 + (216 + x0 + (4*x1)), tmp139 & xmask, other=0.0)
tmp184 = tmp182 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp139, tmp184, tmp185)
tmp187 = tmp7 & tmp7
tmp188 = tmp14 & tmp187
tmp189 = tmp7 & tmp188
tmp190 = tmp14 & tmp189
tmp191 = tl.load(in_ptr0 + (192 + x2), tmp190 & xmask, other=0.0)
tmp192 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp190 & xmask, other=0.0)
tmp193 = tmp191 + tmp192
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp190, tmp193, tmp194)
tmp196 = tl.load(in_ptr0 + (192 + x2), tmp189 & xmask, other=0.0)
tmp197 = tl.where(tmp14, tmp195, tmp196)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp189, tmp197, tmp198)
tmp200 = tl.load(in_ptr0 + (192 + x2), tmp188 & xmask, other=0.0)
tmp201 = tl.where(tmp7, tmp199, tmp200)
tmp202 = tl.full(tmp201.shape, 0.0, tmp201.dtype)
tmp203 = tl.where(tmp188, tmp201, tmp202)
tmp204 = tmp7 & tmp187
tmp205 = tmp14 & tmp204
tmp206 = tl.load(in_ptr0 + (192 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (192 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp14, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (192 + x2), tmp187 & xmask, other=0.0)
tmp216 = tl.where(tmp7, tmp214, tmp215)
tmp217 = tl.where(tmp14, tmp203, tmp216)
tmp218 = tl.full(tmp217.shape, 0.0, tmp217.dtype)
tmp219 = tl.where(tmp187, tmp217, tmp218)
tmp220 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp188 & xmask, other=0.0)
tmp221 = tmp200 + tmp220
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp188, tmp221, tmp222)
tmp224 = tl.where(tmp14, tmp223, tmp215)
tmp225 = tl.full(tmp224.shape, 0.0, tmp224.dtype)
tmp226 = tl.where(tmp187, tmp224, tmp225)
tmp227 = tl.load(in_ptr0 + (192 + x2), tmp7 & xmask, other=0.0)
tmp228 = tl.where(tmp7, tmp226, tmp227)
tmp229 = tl.where(tmp7, tmp219, tmp228)
tmp230 = tl.where(tmp5, tmp186, tmp229)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp7, tmp230, tmp231)
tmp233 = tmp14 & tmp7
tmp234 = tmp7 & tmp233
tmp235 = tmp14 & tmp234
tmp236 = tl.load(in_ptr0 + (192 + x2), tmp235 & xmask, other=0.0)
tmp237 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp235 & xmask, other=0.0)
tmp238 = tmp236 + tmp237
tmp239 = tl.full(tmp238.shape, 0.0, tmp238.dtype)
tmp240 = tl.where(tmp235, tmp238, tmp239)
tmp241 = tl.load(in_ptr0 + (192 + x2), tmp234 & xmask, other=0.0)
tmp242 = tl.where(tmp14, tmp240, tmp241)
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp234, tmp242, tmp243)
tmp245 = tl.load(in_ptr0 + (192 + x2), tmp233 & xmask, other=0.0)
tmp246 = tl.where(tmp7, tmp244, tmp245)
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp233, tmp246, tmp247)
tmp249 = tl.where(tmp14, tmp248, tmp228)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp7, tmp249, tmp250)
tmp252 = tl.load(in_ptr1 + (204 + x0 + (4*x1)), tmp233 & xmask, other=0.0)
tmp253 = tmp245 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp233, tmp253, tmp254)
tmp256 = tl.where(tmp14, tmp255, tmp227)
tmp257 = tl.full(tmp256.shape, 0.0, tmp256.dtype)
tmp258 = tl.where(tmp7, tmp256, tmp257)
tmp260 = tl.where(tmp7, tmp258, tmp259)
tmp261 = tl.where(tmp7, tmp251, tmp260)
tmp262 = tl.where(tmp7, tmp232, tmp261)
tmp263 = tl.where(tmp5, tmp138, tmp262)
tl.store(out_ptr0 + (x2), tmp263, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xs/cxskzsbd5xwrh5ri3x5evkuvt5qxg5vxx6dn7dcjz2nswnkrtk3e.py
# Topologically Sorted Source Nodes: [iadd_13, iadd_14, iadd_15], Original ATen: [aten.add]
# Source node to ATen node mapping:
# iadd_13 => add_13
# iadd_14 => add_14
# iadd_15 => add_15
# Graph fragment:
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_212, %select_27), kwargs = {})
# %slice_scatter_default_52 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_26, %add_13, 1, 4, 8), kwargs = {})
# %slice_scatter_default_53 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %slice_scatter_default_52, 0, 12, 16), kwargs = {})
# %slice_scatter_default_54 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_27, %slice_215, 1, 4, 8), kwargs = {})
# %slice_scatter_default_55 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %slice_scatter_default_54, 0, 12, 16), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_228, %select_29), kwargs = {})
# %slice_scatter_default_56 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_28, %add_14, 1, 8, 12), kwargs = {})
# %slice_scatter_default_57 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %slice_scatter_default_56, 0, 12, 16), kwargs = {})
# %slice_scatter_default_59 : [num_users=4] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %slice_scatter_default_58, 0, 12, 16), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_244, %select_31), kwargs = {})
# %slice_scatter_default_60 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_30, %add_15, 1, 12, 16), kwargs = {})
# %slice_scatter_default_61 : [num_users=5] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %slice_scatter_default_60, 0, 12, 16), kwargs = {})
# %slice_scatter_default_62 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_31, %slice_247, 1, 12, 16), kwargs = {})
# %slice_scatter_default_63 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %slice_scatter_default_62, 0, 12, 16), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 31, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
x0 = xindex % 16
tmp133 = tl.load(in_out_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + ((-192) + x2), tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp4 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp4 >= tmp11
tmp13 = tmp4 < tmp5
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp2 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_out_ptr0 + (x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + (x2), tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_out_ptr0 + (x2), tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp2, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp2 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_out_ptr0 + (x2), tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_out_ptr0 + (x2), tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_out_ptr0 + (x2), tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp2, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_out_ptr0 + (x2), tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp2, tmp53, tmp54)
tmp56 = tl.where(tmp2, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (168 + x0 + (4*x1)), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp2 & tmp2
tmp62 = tmp14 & tmp61
tmp63 = tmp2 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_out_ptr0 + (x2), tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_out_ptr0 + (x2), tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + (x2), tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp2, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp2 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_out_ptr0 + (x2), tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_out_ptr0 + (x2), tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_out_ptr0 + (x2), tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp2, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_out_ptr0 + (x2), tmp2 & xmask, other=0.0)
tmp102 = tl.where(tmp2, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp93, tmp102)
tmp104 = tl.where(tmp8, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp2, tmp104, tmp105)
tmp107 = tmp14 & tmp2
tmp108 = tmp2 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_out_ptr0 + (x2), tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_out_ptr0 + (x2), tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_out_ptr0 + (x2), tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp2, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp2, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (156 + x0 + (4*x1)), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp2, tmp130, tmp131)
tmp134 = tl.where(tmp2, tmp132, tmp133)
tmp135 = tl.where(tmp2, tmp125, tmp134)
tmp136 = tl.where(tmp2, tmp106, tmp135)
tmp137 = tl.where(tmp2, tmp3, tmp136)
tmp138 = tmp4 >= tmp1
tmp139 = tmp138 & tmp2
tmp140 = tmp2 & tmp139
tmp141 = tmp138 & tmp140
tmp142 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp141 & xmask, other=0.0)
tmp143 = tmp137 + tmp142
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp141, tmp143, tmp144)
tmp146 = tl.where(tmp138, tmp145, tmp137)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp140, tmp146, tmp147)
tmp149 = tl.where(tmp2, tmp148, tmp137)
tmp150 = tl.full(tmp149.shape, 0.0, tmp149.dtype)
tmp151 = tl.where(tmp139, tmp149, tmp150)
tmp152 = tmp138 & tmp61
tmp153 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp152 & xmask, other=0.0)
tmp154 = tmp137 + tmp153
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp152, tmp154, tmp155)
tmp157 = tl.where(tmp138, tmp156, tmp137)
tmp158 = tl.full(tmp157.shape, 0.0, tmp157.dtype)
tmp159 = tl.where(tmp61, tmp157, tmp158)
tmp160 = tl.where(tmp2, tmp159, tmp137)
tmp161 = tl.where(tmp138, tmp151, tmp160)
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp2, tmp161, tmp162)
tmp164 = tl.load(in_ptr1 + (180 + x0 + (4*x1)), tmp139 & xmask, other=0.0)
tmp165 = tmp137 + tmp164
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp139, tmp165, tmp166)
tmp168 = tl.where(tmp138, tmp167, tmp137)
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp2, tmp168, tmp169)
tmp171 = tl.where(tmp2, tmp170, tmp137)
tmp172 = tl.where(tmp2, tmp163, tmp171)
tl.store(in_out_ptr0 + (x2), tmp172, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [iadd_2], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf0, arg0_1, buf1, 64, grid=grid(64), stream=stream0)
del buf0
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [output, iadd, iadd_1, iadd_3, iadd_4], Original ATen: [aten.zeros, aten.add]
triton_poi_fused_add_zeros_2.run(buf3, buf1, arg0_1, 256, grid=grid(256), stream=stream0)
buf4 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [iadd_6], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf3, arg0_1, buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [iadd_5], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf6, buf5, buf4, arg0_1, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(buf6, arg0_1, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf6; del buf6 # reuse
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [iadd_7, iadd_8, iadd_9, iadd_10], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf9, buf7, arg0_1, 256, grid=grid(256), stream=stream0)
buf10 = buf7; del buf7 # reuse
buf11 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [iadd_12], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf9, arg0_1, buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [iadd_11], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf12, buf11, buf10, arg0_1, 256, grid=grid(256), stream=stream0)
del buf10
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf12, arg0_1, buf13, 64, grid=grid(64), stream=stream0)
buf14 = buf12; del buf12 # reuse
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [iadd_13, iadd_14, iadd_15], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf15, buf13, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf13
return (buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp264 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x0
tmp4 = tmp3 >= tmp1
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp3 < tmp5
tmp7 = tmp4 & tmp6
tmp8 = tmp7 & tmp2
tmp9 = tmp2 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp2 & tmp10
tmp12 = tmp3 < tmp1
tmp13 = tmp12 & tmp11
tmp14 = tmp2 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr0 + (8 + x2), tmp15 & xmask, other=0.0)
tmp17 = 0.0
tmp18 = tmp17 + tmp16
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp15, tmp18, tmp19)
tmp21 = tl.where(tmp12, tmp20, tmp17)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.where(tmp2, tmp23, tmp17)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp2 & tmp11
tmp28 = tmp12 & tmp27
tmp29 = tl.load(in_ptr0 + (8 + x2), tmp28 & xmask, other=0.0)
tmp30 = tmp17 + tmp29
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp28, tmp30, tmp31)
tmp33 = tl.where(tmp12, tmp32, tmp17)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp2, tmp35, tmp17)
tmp37 = tl.where(tmp12, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp11, tmp37, tmp38)
tmp40 = tl.load(in_ptr0 + (8 + x2), tmp13 & xmask, other=0.0)
tmp41 = tmp17 + tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp13, tmp41, tmp42)
tmp44 = tl.where(tmp12, tmp43, tmp17)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp11, tmp44, tmp45)
tmp47 = tl.where(tmp2, tmp46, tmp17)
tmp48 = tl.where(tmp2, tmp39, tmp47)
tmp49 = tl.load(in_ptr0 + (20 + x2), tmp10 & xmask, other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp10, tmp50, tmp51)
tmp53 = tmp2 & tmp9
tmp54 = tmp12 & tmp53
tmp55 = tmp2 & tmp54
tmp56 = tmp12 & tmp55
tmp57 = tl.load(in_ptr0 + (8 + x2), tmp56 & xmask, other=0.0)
tmp58 = tmp17 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp56, tmp58, tmp59)
tmp61 = tl.where(tmp12, tmp60, tmp17)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp55, tmp61, tmp62)
tmp64 = tl.where(tmp2, tmp63, tmp17)
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp54, tmp64, tmp65)
tmp67 = tmp2 & tmp53
tmp68 = tmp12 & tmp67
tmp69 = tl.load(in_ptr0 + (8 + x2), tmp68 & xmask, other=0.0)
tmp70 = tmp17 + tmp69
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp68, tmp70, tmp71)
tmp73 = tl.where(tmp12, tmp72, tmp17)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp67, tmp73, tmp74)
tmp76 = tl.where(tmp2, tmp75, tmp17)
tmp77 = tl.where(tmp12, tmp66, tmp76)
tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype)
tmp79 = tl.where(tmp53, tmp77, tmp78)
tmp80 = tl.load(in_ptr0 + (8 + x2), tmp54 & xmask, other=0.0)
tmp81 = tmp17 + tmp80
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp54, tmp81, tmp82)
tmp84 = tl.where(tmp12, tmp83, tmp17)
tmp85 = tl.full(tmp84.shape, 0.0, tmp84.dtype)
tmp86 = tl.where(tmp53, tmp84, tmp85)
tmp87 = tl.where(tmp2, tmp86, tmp17)
tmp88 = tl.where(tmp2, tmp79, tmp87)
tmp89 = tl.where(tmp7, tmp52, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp9, tmp89, tmp90)
tmp92 = tmp12 & tmp9
tmp93 = tmp2 & tmp92
tmp94 = tmp12 & tmp93
tmp95 = tl.load(in_ptr0 + (8 + x2), tmp94 & xmask, other=0.0)
tmp96 = tmp17 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp94, tmp96, tmp97)
tmp99 = tl.where(tmp12, tmp98, tmp17)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp93, tmp99, tmp100)
tmp102 = tl.where(tmp2, tmp101, tmp17)
tmp103 = tl.full(tmp102.shape, 0.0, tmp102.dtype)
tmp104 = tl.where(tmp92, tmp102, tmp103)
tmp105 = tl.where(tmp12, tmp104, tmp87)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tl.load(in_ptr0 + (8 + x2), tmp92 & xmask, other=0.0)
tmp109 = tmp17 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp92, tmp109, tmp110)
tmp112 = tl.where(tmp12, tmp111, tmp17)
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp9, tmp112, tmp113)
tmp115 = tl.where(tmp2, tmp114, tmp17)
tmp116 = tl.where(tmp2, tmp107, tmp115)
tmp117 = tl.where(tmp2, tmp91, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp8, tmp117, tmp118)
tmp120 = tmp2 & tmp2
tmp121 = tmp7 & tmp120
tmp122 = tmp2 & tmp121
tmp123 = tmp12 & tmp122
tmp124 = tmp2 & tmp123
tmp125 = tmp12 & tmp124
tmp126 = tl.load(in_ptr0 + (8 + x2), tmp125 & xmask, other=0.0)
tmp127 = tmp17 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp125, tmp127, tmp128)
tmp130 = tl.where(tmp12, tmp129, tmp17)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp124, tmp130, tmp131)
tmp133 = tl.where(tmp2, tmp132, tmp17)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp123, tmp133, tmp134)
tmp136 = tmp2 & tmp122
tmp137 = tmp12 & tmp136
tmp138 = tl.load(in_ptr0 + (8 + x2), tmp137 & xmask, other=0.0)
tmp139 = tmp17 + tmp138
tmp140 = tl.full(tmp139.shape, 0.0, tmp139.dtype)
tmp141 = tl.where(tmp137, tmp139, tmp140)
tmp142 = tl.where(tmp12, tmp141, tmp17)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp136, tmp142, tmp143)
tmp145 = tl.where(tmp2, tmp144, tmp17)
tmp146 = tl.where(tmp12, tmp135, tmp145)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp122, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (8 + x2), tmp123 & xmask, other=0.0)
tmp150 = tmp17 + tmp149
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp123, tmp150, tmp151)
tmp153 = tl.where(tmp12, tmp152, tmp17)
tmp154 = tl.full(tmp153.shape, 0.0, tmp153.dtype)
tmp155 = tl.where(tmp122, tmp153, tmp154)
tmp156 = tl.where(tmp2, tmp155, tmp17)
tmp157 = tl.where(tmp2, tmp148, tmp156)
tmp158 = tl.load(in_ptr0 + (20 + x2), tmp121 & xmask, other=0.0)
tmp159 = tmp157 + tmp158
tmp160 = tl.full(tmp159.shape, 0.0, tmp159.dtype)
tmp161 = tl.where(tmp121, tmp159, tmp160)
tmp162 = tmp2 & tmp120
tmp163 = tmp12 & tmp162
tmp164 = tmp2 & tmp163
tmp165 = tmp12 & tmp164
tmp166 = tl.load(in_ptr0 + (8 + x2), tmp165 & xmask, other=0.0)
tmp167 = tmp17 + tmp166
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp165, tmp167, tmp168)
tmp170 = tl.where(tmp12, tmp169, tmp17)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp164, tmp170, tmp171)
tmp173 = tl.where(tmp2, tmp172, tmp17)
tmp174 = tl.full(tmp173.shape, 0.0, tmp173.dtype)
tmp175 = tl.where(tmp163, tmp173, tmp174)
tmp176 = tmp2 & tmp162
tmp177 = tmp12 & tmp176
tmp178 = tl.load(in_ptr0 + (8 + x2), tmp177 & xmask, other=0.0)
tmp179 = tmp17 + tmp178
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp177, tmp179, tmp180)
tmp182 = tl.where(tmp12, tmp181, tmp17)
tmp183 = tl.full(tmp182.shape, 0.0, tmp182.dtype)
tmp184 = tl.where(tmp176, tmp182, tmp183)
tmp185 = tl.where(tmp2, tmp184, tmp17)
tmp186 = tl.where(tmp12, tmp175, tmp185)
tmp187 = tl.full(tmp186.shape, 0.0, tmp186.dtype)
tmp188 = tl.where(tmp162, tmp186, tmp187)
tmp189 = tl.load(in_ptr0 + (8 + x2), tmp163 & xmask, other=0.0)
tmp190 = tmp17 + tmp189
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp163, tmp190, tmp191)
tmp193 = tl.where(tmp12, tmp192, tmp17)
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp162, tmp193, tmp194)
tmp196 = tl.where(tmp2, tmp195, tmp17)
tmp197 = tl.where(tmp2, tmp188, tmp196)
tmp198 = tl.where(tmp7, tmp161, tmp197)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp120, tmp198, tmp199)
tmp201 = tmp12 & tmp120
tmp202 = tmp2 & tmp201
tmp203 = tmp12 & tmp202
tmp204 = tl.load(in_ptr0 + (8 + x2), tmp203 & xmask, other=0.0)
tmp205 = tmp17 + tmp204
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp203, tmp205, tmp206)
tmp208 = tl.where(tmp12, tmp207, tmp17)
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp202, tmp208, tmp209)
tmp211 = tl.where(tmp2, tmp210, tmp17)
tmp212 = tl.full(tmp211.shape, 0.0, tmp211.dtype)
tmp213 = tl.where(tmp201, tmp211, tmp212)
tmp214 = tl.where(tmp12, tmp213, tmp196)
tmp215 = tl.full(tmp214.shape, 0.0, tmp214.dtype)
tmp216 = tl.where(tmp120, tmp214, tmp215)
tmp217 = tl.load(in_ptr0 + (8 + x2), tmp201 & xmask, other=0.0)
tmp218 = tmp17 + tmp217
tmp219 = tl.full(tmp218.shape, 0.0, tmp218.dtype)
tmp220 = tl.where(tmp201, tmp218, tmp219)
tmp221 = tl.where(tmp12, tmp220, tmp17)
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp120, tmp221, tmp222)
tmp224 = tl.where(tmp2, tmp223, tmp17)
tmp225 = tl.where(tmp2, tmp216, tmp224)
tmp226 = tl.where(tmp2, tmp200, tmp225)
tmp227 = tl.where(tmp7, tmp119, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp2, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (20 + x2), tmp8 & xmask, other=0.0)
tmp231 = tmp116 + tmp230
tmp232 = tl.full(tmp231.shape, 0.0, tmp231.dtype)
tmp233 = tl.where(tmp8, tmp231, tmp232)
tmp234 = tl.where(tmp7, tmp233, tmp225)
tmp235 = tl.full(tmp234.shape, 0.0, tmp234.dtype)
tmp236 = tl.where(tmp2, tmp234, tmp235)
tmp237 = tmp12 & tmp2
tmp238 = tmp2 & tmp237
tmp239 = tmp12 & tmp238
tmp240 = tl.load(in_ptr0 + (8 + x2), tmp239 & xmask, other=0.0)
tmp241 = tmp17 + tmp240
tmp242 = tl.full(tmp241.shape, 0.0, tmp241.dtype)
tmp243 = tl.where(tmp239, tmp241, tmp242)
tmp244 = tl.where(tmp12, tmp243, tmp17)
tmp245 = tl.full(tmp244.shape, 0.0, tmp244.dtype)
tmp246 = tl.where(tmp238, tmp244, tmp245)
tmp247 = tl.where(tmp2, tmp246, tmp17)
tmp248 = tl.full(tmp247.shape, 0.0, tmp247.dtype)
tmp249 = tl.where(tmp237, tmp247, tmp248)
tmp250 = tl.where(tmp12, tmp249, tmp224)
tmp251 = tl.full(tmp250.shape, 0.0, tmp250.dtype)
tmp252 = tl.where(tmp2, tmp250, tmp251)
tmp253 = tl.load(in_ptr0 + (8 + x2), tmp237 & xmask, other=0.0)
tmp254 = tmp17 + tmp253
tmp255 = tl.full(tmp254.shape, 0.0, tmp254.dtype)
tmp256 = tl.where(tmp237, tmp254, tmp255)
tmp257 = tl.where(tmp12, tmp256, tmp17)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp2, tmp257, tmp258)
tmp260 = tl.where(tmp2, tmp259, tmp17)
tmp261 = tl.where(tmp2, tmp252, tmp260)
tmp262 = tl.where(tmp2, tmp236, tmp261)
tmp263 = tl.where(tmp2, tmp229, tmp262)
tmp265 = tmp263 + tmp264
tl.store(out_ptr0 + x2, tmp265, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-8 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tmp7 = x1
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp0 >= tmp8
tmp11 = tmp0 < tmp1
tmp12 = tmp10 & tmp11
tmp13 = tmp12 & tmp9
tmp14 = tmp9 & tmp13
tmp15 = tmp12 & tmp14
tmp16 = tmp9 & tmp15
tmp17 = tmp0 < tmp8
tmp18 = tmp17 & tmp16
tmp19 = tmp9 & tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp20 & xmask, other=0.0)
tmp22 = 0.0
tmp23 = tmp22 + tmp21
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp20, tmp23, tmp24)
tmp26 = tl.where(tmp17, tmp25, tmp22)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp19, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp28, tmp22)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp18, tmp29, tmp30)
tmp32 = tmp9 & tmp16
tmp33 = tmp17 & tmp32
tmp34 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp33 & xmask, other=0.0)
tmp35 = tmp22 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp33, tmp35, tmp36)
tmp38 = tl.where(tmp17, tmp37, tmp22)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp9, tmp40, tmp22)
tmp42 = tl.where(tmp17, tmp31, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp16, tmp42, tmp43)
tmp45 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp18 & xmask, other=0.0)
tmp46 = tmp22 + tmp45
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp18, tmp46, tmp47)
tmp49 = tl.where(tmp17, tmp48, tmp22)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp9, tmp51, tmp22)
tmp53 = tl.where(tmp9, tmp44, tmp52)
tmp54 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp55 = tmp53 + tmp54
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp15, tmp55, tmp56)
tmp58 = tmp9 & tmp14
tmp59 = tmp17 & tmp58
tmp60 = tmp9 & tmp59
tmp61 = tmp17 & tmp60
tmp62 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp61 & xmask, other=0.0)
tmp63 = tmp22 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp61, tmp63, tmp64)
tmp66 = tl.where(tmp17, tmp65, tmp22)
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp60, tmp66, tmp67)
tmp69 = tl.where(tmp9, tmp68, tmp22)
tmp70 = tl.full(tmp69.shape, 0.0, tmp69.dtype)
tmp71 = tl.where(tmp59, tmp69, tmp70)
tmp72 = tmp9 & tmp58
tmp73 = tmp17 & tmp72
tmp74 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp73 & xmask, other=0.0)
tmp75 = tmp22 + tmp74
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp73, tmp75, tmp76)
tmp78 = tl.where(tmp17, tmp77, tmp22)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp72, tmp78, tmp79)
tmp81 = tl.where(tmp9, tmp80, tmp22)
tmp82 = tl.where(tmp17, tmp71, tmp81)
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp58, tmp82, tmp83)
tmp85 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp59 & xmask, other=0.0)
tmp86 = tmp22 + tmp85
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp59, tmp86, tmp87)
tmp89 = tl.where(tmp17, tmp88, tmp22)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp58, tmp89, tmp90)
tmp92 = tl.where(tmp9, tmp91, tmp22)
tmp93 = tl.where(tmp9, tmp84, tmp92)
tmp94 = tl.where(tmp12, tmp57, tmp93)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp14, tmp94, tmp95)
tmp97 = tmp17 & tmp14
tmp98 = tmp9 & tmp97
tmp99 = tmp17 & tmp98
tmp100 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp99 & xmask, other=0.0)
tmp101 = tmp22 + tmp100
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp99, tmp101, tmp102)
tmp104 = tl.where(tmp17, tmp103, tmp22)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp98, tmp104, tmp105)
tmp107 = tl.where(tmp9, tmp106, tmp22)
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp97, tmp107, tmp108)
tmp110 = tl.where(tmp17, tmp109, tmp92)
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp14, tmp110, tmp111)
tmp113 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp97 & xmask, other=0.0)
tmp114 = tmp22 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp97, tmp114, tmp115)
tmp117 = tl.where(tmp17, tmp116, tmp22)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp14, tmp117, tmp118)
tmp120 = tl.where(tmp9, tmp119, tmp22)
tmp121 = tl.where(tmp9, tmp112, tmp120)
tmp122 = tl.where(tmp9, tmp96, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp13, tmp122, tmp123)
tmp125 = tmp9 & tmp9
tmp126 = tmp12 & tmp125
tmp127 = tmp9 & tmp126
tmp128 = tmp17 & tmp127
tmp129 = tmp9 & tmp128
tmp130 = tmp17 & tmp129
tmp131 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp130 & xmask, other=0.0)
tmp132 = tmp22 + tmp131
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp130, tmp132, tmp133)
tmp135 = tl.where(tmp17, tmp134, tmp22)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp129, tmp135, tmp136)
tmp138 = tl.where(tmp9, tmp137, tmp22)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp128, tmp138, tmp139)
tmp141 = tmp9 & tmp127
tmp142 = tmp17 & tmp141
tmp143 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp142 & xmask, other=0.0)
tmp144 = tmp22 + tmp143
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp142, tmp144, tmp145)
tmp147 = tl.where(tmp17, tmp146, tmp22)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp141, tmp147, tmp148)
tmp150 = tl.where(tmp9, tmp149, tmp22)
tmp151 = tl.where(tmp17, tmp140, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp127, tmp151, tmp152)
tmp154 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp128 & xmask, other=0.0)
tmp155 = tmp22 + tmp154
tmp156 = tl.full(tmp155.shape, 0.0, tmp155.dtype)
tmp157 = tl.where(tmp128, tmp155, tmp156)
tmp158 = tl.where(tmp17, tmp157, tmp22)
tmp159 = tl.full(tmp158.shape, 0.0, tmp158.dtype)
tmp160 = tl.where(tmp127, tmp158, tmp159)
tmp161 = tl.where(tmp9, tmp160, tmp22)
tmp162 = tl.where(tmp9, tmp153, tmp161)
tmp163 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp126 & xmask, other=0.0)
tmp164 = tmp162 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp126, tmp164, tmp165)
tmp167 = tmp9 & tmp125
tmp168 = tmp17 & tmp167
tmp169 = tmp9 & tmp168
tmp170 = tmp17 & tmp169
tmp171 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp170 & xmask, other=0.0)
tmp172 = tmp22 + tmp171
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp170, tmp172, tmp173)
tmp175 = tl.where(tmp17, tmp174, tmp22)
tmp176 = tl.full(tmp175.shape, 0.0, tmp175.dtype)
tmp177 = tl.where(tmp169, tmp175, tmp176)
tmp178 = tl.where(tmp9, tmp177, tmp22)
tmp179 = tl.full(tmp178.shape, 0.0, tmp178.dtype)
tmp180 = tl.where(tmp168, tmp178, tmp179)
tmp181 = tmp9 & tmp167
tmp182 = tmp17 & tmp181
tmp183 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp182 & xmask, other=0.0)
tmp184 = tmp22 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp182, tmp184, tmp185)
tmp187 = tl.where(tmp17, tmp186, tmp22)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp181, tmp187, tmp188)
tmp190 = tl.where(tmp9, tmp189, tmp22)
tmp191 = tl.where(tmp17, tmp180, tmp190)
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp167, tmp191, tmp192)
tmp194 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp168 & xmask, other=0.0)
tmp195 = tmp22 + tmp194
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp168, tmp195, tmp196)
tmp198 = tl.where(tmp17, tmp197, tmp22)
tmp199 = tl.full(tmp198.shape, 0.0, tmp198.dtype)
tmp200 = tl.where(tmp167, tmp198, tmp199)
tmp201 = tl.where(tmp9, tmp200, tmp22)
tmp202 = tl.where(tmp9, tmp193, tmp201)
tmp203 = tl.where(tmp12, tmp166, tmp202)
tmp204 = tl.full(tmp203.shape, 0.0, tmp203.dtype)
tmp205 = tl.where(tmp125, tmp203, tmp204)
tmp206 = tmp17 & tmp125
tmp207 = tmp9 & tmp206
tmp208 = tmp17 & tmp207
tmp209 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp208 & xmask, other=0.0)
tmp210 = tmp22 + tmp209
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp208, tmp210, tmp211)
tmp213 = tl.where(tmp17, tmp212, tmp22)
tmp214 = tl.full(tmp213.shape, 0.0, tmp213.dtype)
tmp215 = tl.where(tmp207, tmp213, tmp214)
tmp216 = tl.where(tmp9, tmp215, tmp22)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp206, tmp216, tmp217)
tmp219 = tl.where(tmp17, tmp218, tmp201)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp125, tmp219, tmp220)
tmp222 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp206 & xmask, other=0.0)
tmp223 = tmp22 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp206, tmp223, tmp224)
tmp226 = tl.where(tmp17, tmp225, tmp22)
tmp227 = tl.full(tmp226.shape, 0.0, tmp226.dtype)
tmp228 = tl.where(tmp125, tmp226, tmp227)
tmp229 = tl.where(tmp9, tmp228, tmp22)
tmp230 = tl.where(tmp9, tmp221, tmp229)
tmp231 = tl.where(tmp9, tmp205, tmp230)
tmp232 = tl.where(tmp12, tmp124, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp13 & xmask, other=0.0)
tmp236 = tmp121 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp13, tmp236, tmp237)
tmp239 = tl.where(tmp12, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp9, tmp239, tmp240)
tmp242 = tmp17 & tmp9
tmp243 = tmp9 & tmp242
tmp244 = tmp17 & tmp243
tmp245 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp244 & xmask, other=0.0)
tmp246 = tmp22 + tmp245
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp244, tmp246, tmp247)
tmp249 = tl.where(tmp17, tmp248, tmp22)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp243, tmp249, tmp250)
tmp252 = tl.where(tmp9, tmp251, tmp22)
tmp253 = tl.full(tmp252.shape, 0.0, tmp252.dtype)
tmp254 = tl.where(tmp242, tmp252, tmp253)
tmp255 = tl.where(tmp17, tmp254, tmp229)
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp9, tmp255, tmp256)
tmp258 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp242 & xmask, other=0.0)
tmp259 = tmp22 + tmp258
tmp260 = tl.full(tmp259.shape, 0.0, tmp259.dtype)
tmp261 = tl.where(tmp242, tmp259, tmp260)
tmp262 = tl.where(tmp17, tmp261, tmp22)
tmp263 = tl.full(tmp262.shape, 0.0, tmp262.dtype)
tmp264 = tl.where(tmp9, tmp262, tmp263)
tmp265 = tl.where(tmp9, tmp264, tmp22)
tmp266 = tl.where(tmp9, tmp257, tmp265)
tmp267 = tl.where(tmp9, tmp241, tmp266)
tmp268 = tl.where(tmp9, tmp234, tmp267)
tmp269 = tl.where(tmp5, tmp6, tmp268)
tl.store(out_ptr0 + x2, tmp269, xmask)
@triton.jit
def triton_poi_fused_add_zeros_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + x2, tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tmp4 >= tmp1
tmp6 = tl.full([1], 8, tl.int64)
tmp7 = tmp4 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp8 & tmp10
tmp12 = tmp2 & tmp11
tmp13 = tmp4 < tmp1
tmp14 = tmp13 & tmp12
tmp15 = tmp2 & tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp16 & xmask, other=0.0)
tmp18 = 0.0
tmp19 = tmp18 + tmp17
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp16, tmp19, tmp20)
tmp22 = tl.where(tmp13, tmp21, tmp18)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp15, tmp22, tmp23)
tmp25 = tl.where(tmp2, tmp24, tmp18)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp14, tmp25, tmp26)
tmp28 = tmp2 & tmp12
tmp29 = tmp13 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp29 & xmask, other=0.0)
tmp31 = tmp18 + tmp30
tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype)
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tl.where(tmp13, tmp33, tmp18)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp28, tmp34, tmp35)
tmp37 = tl.where(tmp2, tmp36, tmp18)
tmp38 = tl.where(tmp13, tmp27, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp12, tmp38, tmp39)
tmp41 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp14 & xmask, other=0.0)
tmp42 = tmp18 + tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp14, tmp42, tmp43)
tmp45 = tl.where(tmp13, tmp44, tmp18)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp12, tmp45, tmp46)
tmp48 = tl.where(tmp2, tmp47, tmp18)
tmp49 = tl.where(tmp2, tmp40, tmp48)
tmp50 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp11 & xmask, other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp11, tmp51, tmp52)
tmp54 = tmp2 & tmp10
tmp55 = tmp13 & tmp54
tmp56 = tmp2 & tmp55
tmp57 = tmp13 & tmp56
tmp58 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp57 & xmask, other=0.0)
tmp59 = tmp18 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp57, tmp59, tmp60)
tmp62 = tl.where(tmp13, tmp61, tmp18)
tmp63 = tl.full(tmp62.shape, 0.0, tmp62.dtype)
tmp64 = tl.where(tmp56, tmp62, tmp63)
tmp65 = tl.where(tmp2, tmp64, tmp18)
tmp66 = tl.full(tmp65.shape, 0.0, tmp65.dtype)
tmp67 = tl.where(tmp55, tmp65, tmp66)
tmp68 = tmp2 & tmp54
tmp69 = tmp13 & tmp68
tmp70 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp69 & xmask, other=0.0)
tmp71 = tmp18 + tmp70
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp69, tmp71, tmp72)
tmp74 = tl.where(tmp13, tmp73, tmp18)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp68, tmp74, tmp75)
tmp77 = tl.where(tmp2, tmp76, tmp18)
tmp78 = tl.where(tmp13, tmp67, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp55 & xmask, other=0.0)
tmp82 = tmp18 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp55, tmp82, tmp83)
tmp85 = tl.where(tmp13, tmp84, tmp18)
tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype)
tmp87 = tl.where(tmp54, tmp85, tmp86)
tmp88 = tl.where(tmp2, tmp87, tmp18)
tmp89 = tl.where(tmp2, tmp80, tmp88)
tmp90 = tl.where(tmp8, tmp53, tmp89)
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp10, tmp90, tmp91)
tmp93 = tmp13 & tmp10
tmp94 = tmp2 & tmp93
tmp95 = tmp13 & tmp94
tmp96 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp95 & xmask, other=0.0)
tmp97 = tmp18 + tmp96
tmp98 = tl.full(tmp97.shape, 0.0, tmp97.dtype)
tmp99 = tl.where(tmp95, tmp97, tmp98)
tmp100 = tl.where(tmp13, tmp99, tmp18)
tmp101 = tl.full(tmp100.shape, 0.0, tmp100.dtype)
tmp102 = tl.where(tmp94, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp102, tmp18)
tmp104 = tl.full(tmp103.shape, 0.0, tmp103.dtype)
tmp105 = tl.where(tmp93, tmp103, tmp104)
tmp106 = tl.where(tmp13, tmp105, tmp88)
tmp107 = tl.full(tmp106.shape, 0.0, tmp106.dtype)
tmp108 = tl.where(tmp10, tmp106, tmp107)
tmp109 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp93 & xmask, other=0.0)
tmp110 = tmp18 + tmp109
tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype)
tmp112 = tl.where(tmp93, tmp110, tmp111)
tmp113 = tl.where(tmp13, tmp112, tmp18)
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp10, tmp113, tmp114)
tmp116 = tl.where(tmp2, tmp115, tmp18)
tmp117 = tl.where(tmp2, tmp108, tmp116)
tmp118 = tl.where(tmp2, tmp92, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp9, tmp118, tmp119)
tmp121 = tmp2 & tmp2
tmp122 = tmp8 & tmp121
tmp123 = tmp2 & tmp122
tmp124 = tmp13 & tmp123
tmp125 = tmp2 & tmp124
tmp126 = tmp13 & tmp125
tmp127 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp126 & xmask, other=0.0)
tmp128 = tmp18 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp126, tmp128, tmp129)
tmp131 = tl.where(tmp13, tmp130, tmp18)
tmp132 = tl.full(tmp131.shape, 0.0, tmp131.dtype)
tmp133 = tl.where(tmp125, tmp131, tmp132)
tmp134 = tl.where(tmp2, tmp133, tmp18)
tmp135 = tl.full(tmp134.shape, 0.0, tmp134.dtype)
tmp136 = tl.where(tmp124, tmp134, tmp135)
tmp137 = tmp2 & tmp123
tmp138 = tmp13 & tmp137
tmp139 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp138 & xmask, other=0.0)
tmp140 = tmp18 + tmp139
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp138, tmp140, tmp141)
tmp143 = tl.where(tmp13, tmp142, tmp18)
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp137, tmp143, tmp144)
tmp146 = tl.where(tmp2, tmp145, tmp18)
tmp147 = tl.where(tmp13, tmp136, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp123, tmp147, tmp148)
tmp150 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp124 & xmask, other=0.0)
tmp151 = tmp18 + tmp150
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp124, tmp151, tmp152)
tmp154 = tl.where(tmp13, tmp153, tmp18)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp123, tmp154, tmp155)
tmp157 = tl.where(tmp2, tmp156, tmp18)
tmp158 = tl.where(tmp2, tmp149, tmp157)
tmp159 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp122 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp122, tmp160, tmp161)
tmp163 = tmp2 & tmp121
tmp164 = tmp13 & tmp163
tmp165 = tmp2 & tmp164
tmp166 = tmp13 & tmp165
tmp167 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp166 & xmask, other=0.0)
tmp168 = tmp18 + tmp167
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp166, tmp168, tmp169)
tmp171 = tl.where(tmp13, tmp170, tmp18)
tmp172 = tl.full(tmp171.shape, 0.0, tmp171.dtype)
tmp173 = tl.where(tmp165, tmp171, tmp172)
tmp174 = tl.where(tmp2, tmp173, tmp18)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp164, tmp174, tmp175)
tmp177 = tmp2 & tmp163
tmp178 = tmp13 & tmp177
tmp179 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp178 & xmask, other=0.0)
tmp180 = tmp18 + tmp179
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp178, tmp180, tmp181)
tmp183 = tl.where(tmp13, tmp182, tmp18)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp177, tmp183, tmp184)
tmp186 = tl.where(tmp2, tmp185, tmp18)
tmp187 = tl.where(tmp13, tmp176, tmp186)
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp163, tmp187, tmp188)
tmp190 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp164 & xmask, other=0.0)
tmp191 = tmp18 + tmp190
tmp192 = tl.full(tmp191.shape, 0.0, tmp191.dtype)
tmp193 = tl.where(tmp164, tmp191, tmp192)
tmp194 = tl.where(tmp13, tmp193, tmp18)
tmp195 = tl.full(tmp194.shape, 0.0, tmp194.dtype)
tmp196 = tl.where(tmp163, tmp194, tmp195)
tmp197 = tl.where(tmp2, tmp196, tmp18)
tmp198 = tl.where(tmp2, tmp189, tmp197)
tmp199 = tl.where(tmp8, tmp162, tmp198)
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp121, tmp199, tmp200)
tmp202 = tmp13 & tmp121
tmp203 = tmp2 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp204 & xmask, other=0.0)
tmp206 = tmp18 + tmp205
tmp207 = tl.full(tmp206.shape, 0.0, tmp206.dtype)
tmp208 = tl.where(tmp204, tmp206, tmp207)
tmp209 = tl.where(tmp13, tmp208, tmp18)
tmp210 = tl.full(tmp209.shape, 0.0, tmp209.dtype)
tmp211 = tl.where(tmp203, tmp209, tmp210)
tmp212 = tl.where(tmp2, tmp211, tmp18)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp202, tmp212, tmp213)
tmp215 = tl.where(tmp13, tmp214, tmp197)
tmp216 = tl.full(tmp215.shape, 0.0, tmp215.dtype)
tmp217 = tl.where(tmp121, tmp215, tmp216)
tmp218 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp202 & xmask, other=0.0)
tmp219 = tmp18 + tmp218
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp202, tmp219, tmp220)
tmp222 = tl.where(tmp13, tmp221, tmp18)
tmp223 = tl.full(tmp222.shape, 0.0, tmp222.dtype)
tmp224 = tl.where(tmp121, tmp222, tmp223)
tmp225 = tl.where(tmp2, tmp224, tmp18)
tmp226 = tl.where(tmp2, tmp217, tmp225)
tmp227 = tl.where(tmp2, tmp201, tmp226)
tmp228 = tl.where(tmp8, tmp120, tmp227)
tmp229 = tl.full(tmp228.shape, 0.0, tmp228.dtype)
tmp230 = tl.where(tmp2, tmp228, tmp229)
tmp231 = tl.load(in_ptr1 + (12 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp232 = tmp117 + tmp231
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp9, tmp232, tmp233)
tmp235 = tl.where(tmp8, tmp234, tmp226)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp2, tmp235, tmp236)
tmp238 = tmp13 & tmp2
tmp239 = tmp2 & tmp238
tmp240 = tmp13 & tmp239
tmp241 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp240 & xmask, other=0.0)
tmp242 = tmp18 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp13, tmp244, tmp18)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp2, tmp247, tmp18)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp13, tmp250, tmp225)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp2, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp238 & xmask, other=0.0)
tmp255 = tmp18 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp13, tmp257, tmp18)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp2, tmp258, tmp259)
tmp261 = tl.where(tmp2, tmp260, tmp18)
tmp262 = tl.where(tmp2, tmp253, tmp261)
tmp263 = tl.where(tmp2, tmp237, tmp262)
tmp264 = tl.where(tmp2, tmp230, tmp263)
tmp265 = tl.where(tmp2, tmp3, tmp264)
tmp266 = tmp0 >= tmp1
tmp267 = tmp0 < tmp6
tmp268 = tmp266 & tmp267
tmp269 = tmp13 & tmp268
tmp270 = tmp2 & tmp269
tmp271 = tl.full([1], 12, tl.int64)
tmp272 = tmp4 >= tmp271
tmp273 = tmp272 & tmp270
tmp274 = tmp2 & tmp273
tmp275 = tmp272 & tmp274
tmp276 = tmp2 & tmp275
tmp277 = tmp4 >= tmp6
tmp278 = tmp4 < tmp271
tmp279 = tmp277 & tmp278
tmp279 & tmp276
tmp281 = tl.where(tmp279, tmp265, tmp265)
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp276, tmp281, tmp282)
tmp284 = tl.where(tmp2, tmp283, tmp265)
tmp285 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp275 & xmask, other=0.0)
tmp286 = tmp284 + tmp285
tmp287 = tl.full(tmp286.shape, 0.0, tmp286.dtype)
tmp288 = tl.where(tmp275, tmp286, tmp287)
tmp289 = tmp2 & tmp274
tmp279 & tmp289
tmp291 = tl.where(tmp289, tmp281, tmp282)
tmp292 = tl.where(tmp2, tmp291, tmp265)
tmp293 = tl.where(tmp272, tmp288, tmp292)
tmp294 = tl.full(tmp293.shape, 0.0, tmp293.dtype)
tmp295 = tl.where(tmp274, tmp293, tmp294)
tmp279 & tmp274
tmp297 = tl.where(tmp274, tmp281, tmp282)
tmp298 = tl.where(tmp2, tmp297, tmp265)
tmp299 = tl.where(tmp2, tmp295, tmp298)
tmp300 = tl.full(tmp299.shape, 0.0, tmp299.dtype)
tmp301 = tl.where(tmp273, tmp299, tmp300)
tmp302 = tmp2 & tmp270
tmp303 = tmp272 & tmp302
tmp304 = tmp2 & tmp303
tmp279 & tmp304
tmp306 = tl.where(tmp304, tmp281, tmp282)
tmp307 = tl.where(tmp2, tmp306, tmp265)
tmp308 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp303 & xmask, other=0.0)
tmp309 = tmp307 + tmp308
tmp310 = tl.full(tmp309.shape, 0.0, tmp309.dtype)
tmp311 = tl.where(tmp303, tmp309, tmp310)
tmp312 = tmp2 & tmp302
tmp279 & tmp312
tmp314 = tl.where(tmp312, tmp281, tmp282)
tmp315 = tl.where(tmp2, tmp314, tmp265)
tmp316 = tl.where(tmp272, tmp311, tmp315)
tmp317 = tl.full(tmp316.shape, 0.0, tmp316.dtype)
tmp318 = tl.where(tmp302, tmp316, tmp317)
tmp279 & tmp302
tmp320 = tl.where(tmp302, tmp281, tmp282)
tmp321 = tl.where(tmp2, tmp320, tmp265)
tmp322 = tl.where(tmp2, tmp318, tmp321)
tmp323 = tl.where(tmp272, tmp301, tmp322)
tmp324 = tl.full(tmp323.shape, 0.0, tmp323.dtype)
tmp325 = tl.where(tmp270, tmp323, tmp324)
tmp326 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp273 & xmask, other=0.0)
tmp327 = tmp298 + tmp326
tmp328 = tl.full(tmp327.shape, 0.0, tmp327.dtype)
tmp329 = tl.where(tmp273, tmp327, tmp328)
tmp330 = tl.where(tmp272, tmp329, tmp321)
tmp331 = tl.full(tmp330.shape, 0.0, tmp330.dtype)
tmp332 = tl.where(tmp270, tmp330, tmp331)
tmp279 & tmp270
tmp334 = tl.where(tmp270, tmp281, tmp282)
tmp335 = tl.where(tmp2, tmp334, tmp265)
tmp336 = tl.where(tmp2, tmp332, tmp335)
tmp337 = tl.where(tmp2, tmp325, tmp336)
tmp338 = tl.load(in_ptr1 + (48 + x0 + 4 * x1), tmp269 & xmask, other=0.0)
tmp339 = tmp337 + tmp338
tmp340 = tl.full(tmp339.shape, 0.0, tmp339.dtype)
tmp341 = tl.where(tmp269, tmp339, tmp340)
tmp342 = tmp2 & tmp268
tmp343 = tmp272 & tmp342
tmp344 = tmp2 & tmp343
tmp345 = tmp272 & tmp344
tmp346 = tmp2 & tmp345
tmp279 & tmp346
tmp348 = tl.where(tmp346, tmp281, tmp282)
tmp349 = tl.where(tmp2, tmp348, tmp265)
tmp350 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp345 & xmask, other=0.0)
tmp351 = tmp349 + tmp350
tmp352 = tl.full(tmp351.shape, 0.0, tmp351.dtype)
tmp353 = tl.where(tmp345, tmp351, tmp352)
tmp354 = tmp2 & tmp344
tmp279 & tmp354
tmp356 = tl.where(tmp354, tmp281, tmp282)
tmp357 = tl.where(tmp2, tmp356, tmp265)
tmp358 = tl.where(tmp272, tmp353, tmp357)
tmp359 = tl.full(tmp358.shape, 0.0, tmp358.dtype)
tmp360 = tl.where(tmp344, tmp358, tmp359)
tmp279 & tmp344
tmp362 = tl.where(tmp344, tmp281, tmp282)
tmp363 = tl.where(tmp2, tmp362, tmp265)
tmp364 = tl.where(tmp2, tmp360, tmp363)
tmp365 = tl.full(tmp364.shape, 0.0, tmp364.dtype)
tmp366 = tl.where(tmp343, tmp364, tmp365)
tmp367 = tmp2 & tmp342
tmp368 = tmp272 & tmp367
tmp369 = tmp2 & tmp368
tmp279 & tmp369
tmp371 = tl.where(tmp369, tmp281, tmp282)
tmp372 = tl.where(tmp2, tmp371, tmp265)
tmp373 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp368 & xmask, other=0.0)
tmp374 = tmp372 + tmp373
tmp375 = tl.full(tmp374.shape, 0.0, tmp374.dtype)
tmp376 = tl.where(tmp368, tmp374, tmp375)
tmp377 = tmp2 & tmp367
tmp279 & tmp377
tmp379 = tl.where(tmp377, tmp281, tmp282)
tmp380 = tl.where(tmp2, tmp379, tmp265)
tmp381 = tl.where(tmp272, tmp376, tmp380)
tmp382 = tl.full(tmp381.shape, 0.0, tmp381.dtype)
tmp383 = tl.where(tmp367, tmp381, tmp382)
tmp279 & tmp367
tmp385 = tl.where(tmp367, tmp281, tmp282)
tmp386 = tl.where(tmp2, tmp385, tmp265)
tmp387 = tl.where(tmp2, tmp383, tmp386)
tmp388 = tl.where(tmp272, tmp366, tmp387)
tmp389 = tl.full(tmp388.shape, 0.0, tmp388.dtype)
tmp390 = tl.where(tmp342, tmp388, tmp389)
tmp391 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp343 & xmask, other=0.0)
tmp392 = tmp363 + tmp391
tmp393 = tl.full(tmp392.shape, 0.0, tmp392.dtype)
tmp394 = tl.where(tmp343, tmp392, tmp393)
tmp395 = tl.where(tmp272, tmp394, tmp386)
tmp396 = tl.full(tmp395.shape, 0.0, tmp395.dtype)
tmp397 = tl.where(tmp342, tmp395, tmp396)
tmp279 & tmp342
tmp399 = tl.where(tmp342, tmp281, tmp282)
tmp400 = tl.where(tmp2, tmp399, tmp265)
tmp401 = tl.where(tmp2, tmp397, tmp400)
tmp402 = tl.where(tmp2, tmp390, tmp401)
tmp403 = tl.where(tmp13, tmp341, tmp402)
tmp404 = tl.full(tmp403.shape, 0.0, tmp403.dtype)
tmp405 = tl.where(tmp268, tmp403, tmp404)
tmp406 = tmp272 & tmp2
tmp407 = tmp2 & tmp406
tmp408 = tmp272 & tmp407
tmp409 = tmp2 & tmp408
tmp279 & tmp409
tmp411 = tl.where(tmp409, tmp281, tmp282)
tmp412 = tl.where(tmp2, tmp411, tmp265)
tmp413 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp408 & xmask, other=0.0)
tmp414 = tmp412 + tmp413
tmp415 = tl.full(tmp414.shape, 0.0, tmp414.dtype)
tmp416 = tl.where(tmp408, tmp414, tmp415)
tmp417 = tmp2 & tmp407
tmp279 & tmp417
tmp419 = tl.where(tmp417, tmp281, tmp282)
tmp420 = tl.where(tmp2, tmp419, tmp265)
tmp421 = tl.where(tmp272, tmp416, tmp420)
tmp422 = tl.full(tmp421.shape, 0.0, tmp421.dtype)
tmp423 = tl.where(tmp407, tmp421, tmp422)
tmp279 & tmp407
tmp425 = tl.where(tmp407, tmp281, tmp282)
tmp426 = tl.where(tmp2, tmp425, tmp265)
tmp427 = tl.where(tmp2, tmp423, tmp426)
tmp428 = tl.full(tmp427.shape, 0.0, tmp427.dtype)
tmp429 = tl.where(tmp406, tmp427, tmp428)
tmp430 = tmp272 & tmp121
tmp431 = tmp2 & tmp430
tmp279 & tmp431
tmp433 = tl.where(tmp431, tmp281, tmp282)
tmp434 = tl.where(tmp2, tmp433, tmp265)
tmp435 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp430 & xmask, other=0.0)
tmp436 = tmp434 + tmp435
tmp437 = tl.full(tmp436.shape, 0.0, tmp436.dtype)
tmp438 = tl.where(tmp430, tmp436, tmp437)
tmp279 & tmp163
tmp440 = tl.where(tmp163, tmp281, tmp282)
tmp441 = tl.where(tmp2, tmp440, tmp265)
tmp442 = tl.where(tmp272, tmp438, tmp441)
tmp443 = tl.full(tmp442.shape, 0.0, tmp442.dtype)
tmp444 = tl.where(tmp121, tmp442, tmp443)
tmp279 & tmp121
tmp446 = tl.where(tmp121, tmp281, tmp282)
tmp447 = tl.where(tmp2, tmp446, tmp265)
tmp448 = tl.where(tmp2, tmp444, tmp447)
tmp449 = tl.where(tmp272, tmp429, tmp448)
tmp450 = tl.full(tmp449.shape, 0.0, tmp449.dtype)
tmp451 = tl.where(tmp2, tmp449, tmp450)
tmp452 = tl.load(in_ptr1 + (36 + x0 + 4 * x1), tmp406 & xmask, other=0.0)
tmp453 = tmp426 + tmp452
tmp454 = tl.full(tmp453.shape, 0.0, tmp453.dtype)
tmp455 = tl.where(tmp406, tmp453, tmp454)
tmp456 = tl.where(tmp272, tmp455, tmp447)
tmp457 = tl.full(tmp456.shape, 0.0, tmp456.dtype)
tmp458 = tl.where(tmp2, tmp456, tmp457)
tmp279 & tmp2
tmp460 = tl.where(tmp2, tmp281, tmp282)
tmp461 = tl.where(tmp2, tmp460, tmp265)
tmp462 = tl.where(tmp2, tmp458, tmp461)
tmp463 = tl.where(tmp2, tmp451, tmp462)
tmp464 = tl.where(tmp268, tmp405, tmp463)
tl.store(in_out_ptr0 + x2, tmp464, xmask)
@triton.jit
def triton_poi_fused_add_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp200 = tl.load(in_ptr0 + (64 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 4 + x1
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp6 >= tmp7
tmp9 = tmp6 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp5
tmp12 = tmp0 >= tmp7
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp11
tmp16 = tmp10 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tmp10 & tmp17
tmp19 = tmp0 < tmp7
tmp20 = tmp19 & tmp18
tmp21 = tl.load(in_ptr0 + (64 + x2), tmp20 & xmask, other=0.0)
tmp22 = tl.load(in_ptr0 + (64 + x2), tmp18 & xmask, other=0.0)
tmp23 = tl.where(tmp19, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp18, tmp23, tmp24)
tmp26 = tl.load(in_ptr0 + (64 + x2), tmp17 & xmask, other=0.0)
tmp27 = tl.where(tmp10, tmp25, tmp26)
tmp28 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp17 & xmask, other=0.0)
tmp29 = tmp27 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp17, tmp29, tmp30)
tmp32 = tmp10 & tmp16
tmp33 = tmp19 & tmp32
tmp34 = tl.load(in_ptr0 + (64 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr0 + (64 + x2), tmp32 & xmask, other=0.0)
tmp36 = tl.where(tmp19, tmp34, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp32, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (64 + x2), tmp16 & xmask, other=0.0)
tmp40 = tl.where(tmp10, tmp38, tmp39)
tmp41 = tl.where(tmp14, tmp31, tmp40)
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp16, tmp41, tmp42)
tmp44 = tmp19 & tmp16
tmp45 = tl.load(in_ptr0 + (64 + x2), tmp44 & xmask, other=0.0)
tmp46 = tl.where(tmp19, tmp45, tmp39)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp16, tmp46, tmp47)
tmp49 = tl.load(in_ptr0 + (64 + x2), tmp15 & xmask, other=0.0)
tmp50 = tl.where(tmp10, tmp48, tmp49)
tmp51 = tl.where(tmp10, tmp43, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp15, tmp51, tmp52)
tmp54 = tmp10 & tmp11
tmp55 = tmp14 & tmp54
tmp56 = tmp10 & tmp55
tmp57 = tmp19 & tmp56
tmp58 = tl.load(in_ptr0 + (64 + x2), tmp57 & xmask, other=0.0)
tmp59 = tl.load(in_ptr0 + (64 + x2), tmp56 & xmask, other=0.0)
tmp60 = tl.where(tmp19, tmp58, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.load(in_ptr0 + (64 + x2), tmp55 & xmask, other=0.0)
tmp64 = tl.where(tmp10, tmp62, tmp63)
tmp65 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp55 & xmask, other=0.0)
tmp66 = tmp64 + tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp55, tmp66, tmp67)
tmp69 = tmp10 & tmp54
tmp70 = tmp19 & tmp69
tmp71 = tl.load(in_ptr0 + (64 + x2), tmp70 & xmask, other=0.0)
tmp72 = tl.load(in_ptr0 + (64 + x2), tmp69 & xmask, other=0.0)
tmp73 = tl.where(tmp19, tmp71, tmp72)
tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype)
tmp75 = tl.where(tmp69, tmp73, tmp74)
tmp76 = tl.load(in_ptr0 + (64 + x2), tmp54 & xmask, other=0.0)
tmp77 = tl.where(tmp10, tmp75, tmp76)
tmp78 = tl.where(tmp14, tmp68, tmp77)
tmp79 = tl.full(tmp78.shape, 0.0, tmp78.dtype)
tmp80 = tl.where(tmp54, tmp78, tmp79)
tmp81 = tmp19 & tmp54
tmp82 = tl.load(in_ptr0 + (64 + x2), tmp81 & xmask, other=0.0)
tmp83 = tl.where(tmp19, tmp82, tmp76)
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp54, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (64 + x2), tmp11 & xmask, other=0.0)
tmp87 = tl.where(tmp10, tmp85, tmp86)
tmp88 = tl.where(tmp10, tmp80, tmp87)
tmp89 = tl.where(tmp14, tmp53, tmp88)
tmp90 = tl.full(tmp89.shape, 0.0, tmp89.dtype)
tmp91 = tl.where(tmp11, tmp89, tmp90)
tmp92 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp93 = tmp50 + tmp92
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp15, tmp93, tmp94)
tmp96 = tl.where(tmp14, tmp95, tmp87)
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp11, tmp96, tmp97)
tmp99 = tmp19 & tmp11
tmp100 = tl.load(in_ptr0 + (64 + x2), tmp99 & xmask, other=0.0)
tmp101 = tl.where(tmp19, tmp100, tmp86)
tmp102 = tl.full(tmp101.shape, 0.0, tmp101.dtype)
tmp103 = tl.where(tmp11, tmp101, tmp102)
tmp104 = tl.load(in_ptr0 + (64 + x2), tmp5 & xmask, other=0.0)
tmp105 = tl.where(tmp10, tmp103, tmp104)
tmp106 = tl.where(tmp10, tmp98, tmp105)
tmp107 = tl.where(tmp10, tmp91, tmp106)
tmp108 = tl.load(in_ptr1 + (88 + x0 + 4 * x1), tmp5 & xmask, other=0.0)
tmp109 = tmp107 + tmp108
tmp110 = tl.full(tmp109.shape, 0.0, tmp109.dtype)
tmp111 = tl.where(tmp5, tmp109, tmp110)
tmp112 = tmp14 & tmp10
tmp113 = tmp10 & tmp112
tmp114 = tmp14 & tmp113
tmp115 = tmp10 & tmp114
tmp116 = tmp19 & tmp115
tmp117 = tl.load(in_ptr0 + (64 + x2), tmp116 & xmask, other=0.0)
tmp118 = tl.load(in_ptr0 + (64 + x2), tmp115 & xmask, other=0.0)
tmp119 = tl.where(tmp19, tmp117, tmp118)
tmp120 = tl.full(tmp119.shape, 0.0, tmp119.dtype)
tmp121 = tl.where(tmp115, tmp119, tmp120)
tmp122 = tl.load(in_ptr0 + (64 + x2), tmp114 & xmask, other=0.0)
tmp123 = tl.where(tmp10, tmp121, tmp122)
tmp124 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp114 & xmask, other=0.0)
tmp125 = tmp123 + tmp124
tmp126 = tl.full(tmp125.shape, 0.0, tmp125.dtype)
tmp127 = tl.where(tmp114, tmp125, tmp126)
tmp128 = tmp10 & tmp113
tmp129 = tmp19 & tmp128
tmp130 = tl.load(in_ptr0 + (64 + x2), tmp129 & xmask, other=0.0)
tmp131 = tl.load(in_ptr0 + (64 + x2), tmp128 & xmask, other=0.0)
tmp132 = tl.where(tmp19, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp128, tmp132, tmp133)
tmp135 = tl.load(in_ptr0 + (64 + x2), tmp113 & xmask, other=0.0)
tmp136 = tl.where(tmp10, tmp134, tmp135)
tmp137 = tl.where(tmp14, tmp127, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp113, tmp137, tmp138)
tmp140 = tmp19 & tmp113
tmp141 = tl.load(in_ptr0 + (64 + x2), tmp140 & xmask, other=0.0)
tmp142 = tl.where(tmp19, tmp141, tmp135)
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp113, tmp142, tmp143)
tmp145 = tl.load(in_ptr0 + (64 + x2), tmp112 & xmask, other=0.0)
tmp146 = tl.where(tmp10, tmp144, tmp145)
tmp147 = tl.where(tmp10, tmp139, tmp146)
tmp148 = tl.full(tmp147.shape, 0.0, tmp147.dtype)
tmp149 = tl.where(tmp112, tmp147, tmp148)
tmp150 = tmp10 & tmp10
tmp151 = tmp14 & tmp150
tmp152 = tmp10 & tmp151
tmp153 = tmp19 & tmp152
tmp154 = tl.load(in_ptr0 + (64 + x2), tmp153 & xmask, other=0.0)
tmp155 = tl.load(in_ptr0 + (64 + x2), tmp152 & xmask, other=0.0)
tmp156 = tl.where(tmp19, tmp154, tmp155)
tmp157 = tl.full(tmp156.shape, 0.0, tmp156.dtype)
tmp158 = tl.where(tmp152, tmp156, tmp157)
tmp159 = tl.load(in_ptr0 + (64 + x2), tmp151 & xmask, other=0.0)
tmp160 = tl.where(tmp10, tmp158, tmp159)
tmp161 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp151 & xmask, other=0.0)
tmp162 = tmp160 + tmp161
tmp163 = tl.full(tmp162.shape, 0.0, tmp162.dtype)
tmp164 = tl.where(tmp151, tmp162, tmp163)
tmp165 = tmp10 & tmp150
tmp166 = tmp19 & tmp165
tmp167 = tl.load(in_ptr0 + (64 + x2), tmp166 & xmask, other=0.0)
tmp168 = tl.load(in_ptr0 + (64 + x2), tmp165 & xmask, other=0.0)
tmp169 = tl.where(tmp19, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp165, tmp169, tmp170)
tmp172 = tl.load(in_ptr0 + (64 + x2), tmp150 & xmask, other=0.0)
tmp173 = tl.where(tmp10, tmp171, tmp172)
tmp174 = tl.where(tmp14, tmp164, tmp173)
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp150, tmp174, tmp175)
tmp177 = tmp19 & tmp150
tmp178 = tl.load(in_ptr0 + (64 + x2), tmp177 & xmask, other=0.0)
tmp179 = tl.where(tmp19, tmp178, tmp172)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp150, tmp179, tmp180)
tmp182 = tl.load(in_ptr0 + (64 + x2), tmp10 & xmask, other=0.0)
tmp183 = tl.where(tmp10, tmp181, tmp182)
tmp184 = tl.where(tmp10, tmp176, tmp183)
tmp185 = tl.where(tmp14, tmp149, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp10, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (76 + x0 + 4 * x1), tmp112 & xmask, other=0.0)
tmp189 = tmp146 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp112, tmp189, tmp190)
tmp192 = tl.where(tmp14, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp10, tmp192, tmp193)
tmp195 = tmp19 & tmp10
tmp196 = tl.load(in_ptr0 + (64 + x2), tmp195 & xmask, other=0.0)
tmp197 = tl.where(tmp19, tmp196, tmp182)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp10, tmp197, tmp198)
tmp201 = tl.where(tmp10, tmp199, tmp200)
tmp202 = tl.where(tmp10, tmp194, tmp201)
tmp203 = tl.where(tmp10, tmp187, tmp202)
tmp204 = tl.where(tmp5, tmp111, tmp203)
tmp205 = tl.where(tmp10, tmp204, tmp107)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp5, tmp205, tmp206)
tmp208 = tl.where(tmp10, tmp204, tmp203)
tmp209 = tl.where(tmp5, tmp207, tmp208)
tl.store(out_ptr0 + x2, tmp204, xmask)
tl.store(out_ptr1 + x2, tmp209, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp101 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 8, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-64 + x2), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (-64 + x2), tmp5 & xmask, other=0.0)
tmp8 = x0
tmp9 = tmp8 >= tmp1
tmp10 = tmp8 < tmp3
tmp11 = tmp9 & tmp10
tmp12 = tmp11 & tmp5
tmp13 = tmp5 & tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp5 & tmp14
tmp16 = tmp8 < tmp1
tmp17 = tmp16 & tmp15
tmp18 = tl.load(in_out_ptr0 + x2, tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_out_ptr0 + x2, tmp15 & xmask, other=0.0)
tmp20 = tl.where(tmp16, tmp18, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp15, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + x2, tmp14 & xmask, other=0.0)
tmp24 = tl.where(tmp5, tmp22, tmp23)
tmp25 = tl.load(in_ptr2 + (60 + x0 + 4 * x1), tmp14 & xmask, other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp14, tmp26, tmp27)
tmp29 = tmp5 & tmp13
tmp30 = tmp16 & tmp29
tmp31 = tl.load(in_out_ptr0 + x2, tmp30 & xmask, other=0.0)
tmp32 = tl.load(in_out_ptr0 + x2, tmp29 & xmask, other=0.0)
tmp33 = tl.where(tmp16, tmp31, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp29, tmp33, tmp34)
tmp36 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp37 = tl.where(tmp5, tmp35, tmp36)
tmp38 = tl.where(tmp11, tmp28, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp13, tmp38, tmp39)
tmp41 = tmp16 & tmp13
tmp42 = tl.load(in_out_ptr0 + x2, tmp41 & xmask, other=0.0)
tmp43 = tl.where(tmp16, tmp42, tmp36)
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp13, tmp43, tmp44)
tmp46 = tl.load(in_out_ptr0 + x2, tmp12 & xmask, other=0.0)
tmp47 = tl.where(tmp5, tmp45, tmp46)
tmp48 = tl.where(tmp5, tmp40, tmp47)
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp12, tmp48, tmp49)
tmp51 = tmp5 & tmp5
tmp52 = tmp11 & tmp51
tmp53 = tmp5 & tmp52
tmp54 = tmp16 & tmp53
tmp55 = tl.load(in_out_ptr0 + x2, tmp54 & xmask, other=0.0)
tmp56 = tl.load(in_out_ptr0 + x2, tmp53 & xmask, other=0.0)
tmp57 = tl.where(tmp16, tmp55, tmp56)
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp53, tmp57, tmp58)
tmp60 = tl.load(in_out_ptr0 + x2, tmp52 & xmask, other=0.0)
tmp61 = tl.where(tmp5, tmp59, tmp60)
tmp62 = tl.load(in_ptr2 + (60 + x0 + 4 * x1), tmp52 & xmask, other=0.0)
tmp63 = tmp61 + tmp62
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp52, tmp63, tmp64)
tmp66 = tmp5 & tmp51
tmp67 = tmp16 & tmp66
tmp68 = tl.load(in_out_ptr0 + x2, tmp67 & xmask, other=0.0)
tmp69 = tl.load(in_out_ptr0 + x2, tmp66 & xmask, other=0.0)
tmp70 = tl.where(tmp16, tmp68, tmp69)
tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype)
tmp72 = tl.where(tmp66, tmp70, tmp71)
tmp73 = tl.load(in_out_ptr0 + x2, tmp51 & xmask, other=0.0)
tmp74 = tl.where(tmp5, tmp72, tmp73)
tmp75 = tl.where(tmp11, tmp65, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp51, tmp75, tmp76)
tmp78 = tmp16 & tmp51
tmp79 = tl.load(in_out_ptr0 + x2, tmp78 & xmask, other=0.0)
tmp80 = tl.where(tmp16, tmp79, tmp73)
tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype)
tmp82 = tl.where(tmp51, tmp80, tmp81)
tmp83 = tl.load(in_out_ptr0 + x2, tmp5 & xmask, other=0.0)
tmp84 = tl.where(tmp5, tmp82, tmp83)
tmp85 = tl.where(tmp5, tmp77, tmp84)
tmp86 = tl.where(tmp11, tmp50, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp5, tmp86, tmp87)
tmp89 = tl.load(in_ptr2 + (60 + x0 + 4 * x1), tmp12 & xmask, other=0.0)
tmp90 = tmp47 + tmp89
tmp91 = tl.full(tmp90.shape, 0.0, tmp90.dtype)
tmp92 = tl.where(tmp12, tmp90, tmp91)
tmp93 = tl.where(tmp11, tmp92, tmp84)
tmp94 = tl.full(tmp93.shape, 0.0, tmp93.dtype)
tmp95 = tl.where(tmp5, tmp93, tmp94)
tmp96 = tmp16 & tmp5
tmp97 = tl.load(in_out_ptr0 + x2, tmp96 & xmask, other=0.0)
tmp98 = tl.where(tmp16, tmp97, tmp83)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp5, tmp98, tmp99)
tmp102 = tl.where(tmp5, tmp100, tmp101)
tmp103 = tl.where(tmp5, tmp95, tmp102)
tmp104 = tl.where(tmp5, tmp88, tmp103)
tmp105 = tl.where(tmp5, tmp7, tmp104)
tmp106 = tl.where(tmp5, tmp6, tmp105)
tl.store(in_out_ptr0 + x2, tmp106, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp287 = tl.load(in_ptr0 + (128 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 8 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tmp3 >= tmp1
tmp12 = tmp3 < tmp4
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp0 >= tmp6
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr0 + (128 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (128 + x2), tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_ptr0 + (128 + x2), tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_ptr0 + (128 + x2), tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_ptr0 + (128 + x2), tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_ptr0 + (128 + x2), tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_ptr0 + (128 + x2), tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (128 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp9
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_ptr0 + (128 + x2), tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_ptr0 + (128 + x2), tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_ptr0 + (128 + x2), tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_ptr0 + (128 + x2), tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_ptr0 + (128 + x2), tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_ptr0 + (128 + x2), tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (128 + x2), tmp9 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp2, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp9, tmp105, tmp106)
tmp108 = tmp13 & tmp2
tmp109 = tmp15 & tmp108
tmp110 = tmp13 & tmp109
tmp111 = tmp15 & tmp110
tmp112 = tl.load(in_ptr0 + (128 + x2), tmp111 & xmask, other=0.0)
tmp113 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp111 & xmask, other=0.0)
tmp114 = tmp112 + tmp113
tmp115 = tl.full(tmp114.shape, 0.0, tmp114.dtype)
tmp116 = tl.where(tmp111, tmp114, tmp115)
tmp117 = tl.load(in_ptr0 + (128 + x2), tmp110 & xmask, other=0.0)
tmp118 = tl.where(tmp15, tmp116, tmp117)
tmp119 = tl.full(tmp118.shape, 0.0, tmp118.dtype)
tmp120 = tl.where(tmp110, tmp118, tmp119)
tmp121 = tl.load(in_ptr0 + (128 + x2), tmp109 & xmask, other=0.0)
tmp122 = tl.where(tmp13, tmp120, tmp121)
tmp123 = tl.full(tmp122.shape, 0.0, tmp122.dtype)
tmp124 = tl.where(tmp109, tmp122, tmp123)
tmp125 = tmp13 & tmp108
tmp126 = tmp15 & tmp125
tmp127 = tl.load(in_ptr0 + (128 + x2), tmp126 & xmask, other=0.0)
tmp128 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp126 & xmask, other=0.0)
tmp129 = tmp127 + tmp128
tmp130 = tl.full(tmp129.shape, 0.0, tmp129.dtype)
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.load(in_ptr0 + (128 + x2), tmp125 & xmask, other=0.0)
tmp133 = tl.where(tmp15, tmp131, tmp132)
tmp134 = tl.full(tmp133.shape, 0.0, tmp133.dtype)
tmp135 = tl.where(tmp125, tmp133, tmp134)
tmp136 = tl.load(in_ptr0 + (128 + x2), tmp108 & xmask, other=0.0)
tmp137 = tl.where(tmp13, tmp135, tmp136)
tmp138 = tl.where(tmp15, tmp124, tmp137)
tmp139 = tl.full(tmp138.shape, 0.0, tmp138.dtype)
tmp140 = tl.where(tmp108, tmp138, tmp139)
tmp141 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp109 & xmask, other=0.0)
tmp142 = tmp121 + tmp141
tmp143 = tl.full(tmp142.shape, 0.0, tmp142.dtype)
tmp144 = tl.where(tmp109, tmp142, tmp143)
tmp145 = tl.where(tmp15, tmp144, tmp136)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp108, tmp145, tmp146)
tmp148 = tl.load(in_ptr0 + (128 + x2), tmp2 & xmask, other=0.0)
tmp149 = tl.where(tmp13, tmp147, tmp148)
tmp150 = tl.where(tmp13, tmp140, tmp149)
tmp151 = tl.where(tmp8, tmp107, tmp150)
tmp152 = tl.full(tmp151.shape, 0.0, tmp151.dtype)
tmp153 = tl.where(tmp2, tmp151, tmp152)
tmp154 = tmp2 & tmp8
tmp155 = tmp13 & tmp154
tmp156 = tmp15 & tmp155
tmp157 = tmp13 & tmp156
tmp158 = tmp15 & tmp157
tmp159 = tl.load(in_ptr0 + (128 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (128 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp15, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (128 + x2), tmp156 & xmask, other=0.0)
tmp169 = tl.where(tmp13, tmp167, tmp168)
tmp170 = tl.full(tmp169.shape, 0.0, tmp169.dtype)
tmp171 = tl.where(tmp156, tmp169, tmp170)
tmp172 = tmp13 & tmp155
tmp173 = tmp15 & tmp172
tmp174 = tl.load(in_ptr0 + (128 + x2), tmp173 & xmask, other=0.0)
tmp175 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp173 & xmask, other=0.0)
tmp176 = tmp174 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp173, tmp176, tmp177)
tmp179 = tl.load(in_ptr0 + (128 + x2), tmp172 & xmask, other=0.0)
tmp180 = tl.where(tmp15, tmp178, tmp179)
tmp181 = tl.full(tmp180.shape, 0.0, tmp180.dtype)
tmp182 = tl.where(tmp172, tmp180, tmp181)
tmp183 = tl.load(in_ptr0 + (128 + x2), tmp155 & xmask, other=0.0)
tmp184 = tl.where(tmp13, tmp182, tmp183)
tmp185 = tl.where(tmp15, tmp171, tmp184)
tmp186 = tl.full(tmp185.shape, 0.0, tmp185.dtype)
tmp187 = tl.where(tmp155, tmp185, tmp186)
tmp188 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp156 & xmask, other=0.0)
tmp189 = tmp168 + tmp188
tmp190 = tl.full(tmp189.shape, 0.0, tmp189.dtype)
tmp191 = tl.where(tmp156, tmp189, tmp190)
tmp192 = tl.where(tmp15, tmp191, tmp183)
tmp193 = tl.full(tmp192.shape, 0.0, tmp192.dtype)
tmp194 = tl.where(tmp155, tmp192, tmp193)
tmp195 = tl.load(in_ptr0 + (128 + x2), tmp154 & xmask, other=0.0)
tmp196 = tl.where(tmp13, tmp194, tmp195)
tmp197 = tl.where(tmp13, tmp187, tmp196)
tmp198 = tl.load(in_ptr1 + (128 + x0 + 4 * x1), tmp154 & xmask, other=0.0)
tmp199 = tmp197 + tmp198
tmp200 = tl.full(tmp199.shape, 0.0, tmp199.dtype)
tmp201 = tl.where(tmp154, tmp199, tmp200)
tmp202 = tmp13 & tmp8
tmp203 = tmp15 & tmp202
tmp204 = tmp13 & tmp203
tmp205 = tmp15 & tmp204
tmp206 = tl.load(in_ptr0 + (128 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (128 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp15, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (128 + x2), tmp203 & xmask, other=0.0)
tmp216 = tl.where(tmp13, tmp214, tmp215)
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp203, tmp216, tmp217)
tmp219 = tmp13 & tmp202
tmp220 = tmp15 & tmp219
tmp221 = tl.load(in_ptr0 + (128 + x2), tmp220 & xmask, other=0.0)
tmp222 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp220 & xmask, other=0.0)
tmp223 = tmp221 + tmp222
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp220, tmp223, tmp224)
tmp226 = tl.load(in_ptr0 + (128 + x2), tmp219 & xmask, other=0.0)
tmp227 = tl.where(tmp15, tmp225, tmp226)
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp219, tmp227, tmp228)
tmp230 = tl.load(in_ptr0 + (128 + x2), tmp202 & xmask, other=0.0)
tmp231 = tl.where(tmp13, tmp229, tmp230)
tmp232 = tl.where(tmp15, tmp218, tmp231)
tmp233 = tl.full(tmp232.shape, 0.0, tmp232.dtype)
tmp234 = tl.where(tmp202, tmp232, tmp233)
tmp235 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp203 & xmask, other=0.0)
tmp236 = tmp215 + tmp235
tmp237 = tl.full(tmp236.shape, 0.0, tmp236.dtype)
tmp238 = tl.where(tmp203, tmp236, tmp237)
tmp239 = tl.where(tmp15, tmp238, tmp230)
tmp240 = tl.full(tmp239.shape, 0.0, tmp239.dtype)
tmp241 = tl.where(tmp202, tmp239, tmp240)
tmp242 = tl.load(in_ptr0 + (128 + x2), tmp8 & xmask, other=0.0)
tmp243 = tl.where(tmp13, tmp241, tmp242)
tmp244 = tl.where(tmp13, tmp234, tmp243)
tmp245 = tl.where(tmp2, tmp201, tmp244)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp8, tmp245, tmp246)
tmp248 = tmp15 & tmp13
tmp249 = tmp13 & tmp248
tmp250 = tmp15 & tmp249
tmp251 = tl.load(in_ptr0 + (128 + x2), tmp250 & xmask, other=0.0)
tmp252 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp250 & xmask, other=0.0)
tmp253 = tmp251 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp250, tmp253, tmp254)
tmp256 = tl.load(in_ptr0 + (128 + x2), tmp249 & xmask, other=0.0)
tmp257 = tl.where(tmp15, tmp255, tmp256)
tmp258 = tl.full(tmp257.shape, 0.0, tmp257.dtype)
tmp259 = tl.where(tmp249, tmp257, tmp258)
tmp260 = tl.load(in_ptr0 + (128 + x2), tmp248 & xmask, other=0.0)
tmp261 = tl.where(tmp13, tmp259, tmp260)
tmp262 = tl.full(tmp261.shape, 0.0, tmp261.dtype)
tmp263 = tl.where(tmp248, tmp261, tmp262)
tmp264 = tmp13 & tmp13
tmp265 = tmp15 & tmp264
tmp266 = tl.load(in_ptr0 + (128 + x2), tmp265 & xmask, other=0.0)
tmp267 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp265 & xmask, other=0.0)
tmp268 = tmp266 + tmp267
tmp269 = tl.full(tmp268.shape, 0.0, tmp268.dtype)
tmp270 = tl.where(tmp265, tmp268, tmp269)
tmp271 = tl.load(in_ptr0 + (128 + x2), tmp264 & xmask, other=0.0)
tmp272 = tl.where(tmp15, tmp270, tmp271)
tmp273 = tl.full(tmp272.shape, 0.0, tmp272.dtype)
tmp274 = tl.where(tmp264, tmp272, tmp273)
tmp275 = tl.load(in_ptr0 + (128 + x2), tmp13 & xmask, other=0.0)
tmp276 = tl.where(tmp13, tmp274, tmp275)
tmp277 = tl.where(tmp15, tmp263, tmp276)
tmp278 = tl.full(tmp277.shape, 0.0, tmp277.dtype)
tmp279 = tl.where(tmp13, tmp277, tmp278)
tmp280 = tl.load(in_ptr1 + (116 + x0 + 4 * x1), tmp248 & xmask, other=0.0)
tmp281 = tmp260 + tmp280
tmp282 = tl.full(tmp281.shape, 0.0, tmp281.dtype)
tmp283 = tl.where(tmp248, tmp281, tmp282)
tmp284 = tl.where(tmp15, tmp283, tmp275)
tmp285 = tl.full(tmp284.shape, 0.0, tmp284.dtype)
tmp286 = tl.where(tmp13, tmp284, tmp285)
tmp288 = tl.where(tmp13, tmp286, tmp287)
tmp289 = tl.where(tmp13, tmp279, tmp288)
tmp290 = tl.where(tmp8, tmp247, tmp289)
tmp291 = tl.where(tmp2, tmp153, tmp290)
tl.store(out_ptr0 + x2, tmp291, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp147 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (-128 + x2), tmp5 & xmask, other=0.0)
tmp7 = x0
tmp8 = tl.full([1], 4, tl.int64)
tmp9 = tmp7 < tmp8
tmp10 = tmp9 & tmp5
tmp11 = tmp0 >= tmp8
tmp12 = tmp0 < tmp1
tmp13 = tmp11 & tmp12
tmp14 = tmp13 & tmp10
tmp15 = tmp7 >= tmp3
tmp16 = tmp15 & tmp14
tmp17 = tmp13 & tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_out_ptr0 + x2, tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp18 & xmask, other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + x2, tmp17 & xmask, other=0.0)
tmp25 = tl.where(tmp15, tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp17, tmp25, tmp26)
tmp28 = tl.load(in_out_ptr0 + x2, tmp16 & xmask, other=0.0)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp16, tmp29, tmp30)
tmp32 = tmp13 & tmp14
tmp33 = tmp15 & tmp32
tmp34 = tl.load(in_out_ptr0 + x2, tmp33 & xmask, other=0.0)
tmp35 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp33 & xmask, other=0.0)
tmp36 = tmp34 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.load(in_out_ptr0 + x2, tmp32 & xmask, other=0.0)
tmp40 = tl.where(tmp15, tmp38, tmp39)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp32, tmp40, tmp41)
tmp43 = tl.load(in_out_ptr0 + x2, tmp14 & xmask, other=0.0)
tmp44 = tl.where(tmp13, tmp42, tmp43)
tmp45 = tl.where(tmp15, tmp31, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp14, tmp45, tmp46)
tmp48 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp16 & xmask, other=0.0)
tmp49 = tmp28 + tmp48
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp16, tmp49, tmp50)
tmp52 = tl.where(tmp15, tmp51, tmp43)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp14, tmp52, tmp53)
tmp55 = tl.load(in_out_ptr0 + x2, tmp10 & xmask, other=0.0)
tmp56 = tl.where(tmp13, tmp54, tmp55)
tmp57 = tl.where(tmp13, tmp47, tmp56)
tmp58 = tl.load(in_ptr1 + (96 + x0 + 4 * x1), tmp10 & xmask, other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp10, tmp59, tmp60)
tmp62 = tmp13 & tmp5
tmp63 = tmp15 & tmp62
tmp64 = tmp13 & tmp63
tmp65 = tmp15 & tmp64
tmp66 = tl.load(in_out_ptr0 + x2, tmp65 & xmask, other=0.0)
tmp67 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp65 & xmask, other=0.0)
tmp68 = tmp66 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp65, tmp68, tmp69)
tmp71 = tl.load(in_out_ptr0 + x2, tmp64 & xmask, other=0.0)
tmp72 = tl.where(tmp15, tmp70, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp64, tmp72, tmp73)
tmp75 = tl.load(in_out_ptr0 + x2, tmp63 & xmask, other=0.0)
tmp76 = tl.where(tmp13, tmp74, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp63, tmp76, tmp77)
tmp79 = tmp13 & tmp62
tmp80 = tmp15 & tmp79
tmp81 = tl.load(in_out_ptr0 + x2, tmp80 & xmask, other=0.0)
tmp82 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp80 & xmask, other=0.0)
tmp83 = tmp81 + tmp82
tmp84 = tl.full(tmp83.shape, 0.0, tmp83.dtype)
tmp85 = tl.where(tmp80, tmp83, tmp84)
tmp86 = tl.load(in_out_ptr0 + x2, tmp79 & xmask, other=0.0)
tmp87 = tl.where(tmp15, tmp85, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp79, tmp87, tmp88)
tmp90 = tl.load(in_out_ptr0 + x2, tmp62 & xmask, other=0.0)
tmp91 = tl.where(tmp13, tmp89, tmp90)
tmp92 = tl.where(tmp15, tmp78, tmp91)
tmp93 = tl.full(tmp92.shape, 0.0, tmp92.dtype)
tmp94 = tl.where(tmp62, tmp92, tmp93)
tmp95 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp63 & xmask, other=0.0)
tmp96 = tmp75 + tmp95
tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype)
tmp98 = tl.where(tmp63, tmp96, tmp97)
tmp99 = tl.where(tmp15, tmp98, tmp90)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp62, tmp99, tmp100)
tmp102 = tl.load(in_out_ptr0 + x2, tmp5 & xmask, other=0.0)
tmp103 = tl.where(tmp13, tmp101, tmp102)
tmp104 = tl.where(tmp13, tmp94, tmp103)
tmp105 = tl.where(tmp9, tmp61, tmp104)
tmp106 = tl.full(tmp105.shape, 0.0, tmp105.dtype)
tmp107 = tl.where(tmp5, tmp105, tmp106)
tmp108 = tmp15 & tmp13
tmp109 = tmp13 & tmp108
tmp110 = tmp15 & tmp109
tmp111 = tl.load(in_out_ptr0 + x2, tmp110 & xmask, other=0.0)
tmp112 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp110 & xmask, other=0.0)
tmp113 = tmp111 + tmp112
tmp114 = tl.full(tmp113.shape, 0.0, tmp113.dtype)
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.load(in_out_ptr0 + x2, tmp109 & xmask, other=0.0)
tmp117 = tl.where(tmp15, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp109, tmp117, tmp118)
tmp120 = tl.load(in_out_ptr0 + x2, tmp108 & xmask, other=0.0)
tmp121 = tl.where(tmp13, tmp119, tmp120)
tmp122 = tl.full(tmp121.shape, 0.0, tmp121.dtype)
tmp123 = tl.where(tmp108, tmp121, tmp122)
tmp124 = tmp13 & tmp13
tmp125 = tmp15 & tmp124
tmp126 = tl.load(in_out_ptr0 + x2, tmp125 & xmask, other=0.0)
tmp127 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp125 & xmask, other=0.0)
tmp128 = tmp126 + tmp127
tmp129 = tl.full(tmp128.shape, 0.0, tmp128.dtype)
tmp130 = tl.where(tmp125, tmp128, tmp129)
tmp131 = tl.load(in_out_ptr0 + x2, tmp124 & xmask, other=0.0)
tmp132 = tl.where(tmp15, tmp130, tmp131)
tmp133 = tl.full(tmp132.shape, 0.0, tmp132.dtype)
tmp134 = tl.where(tmp124, tmp132, tmp133)
tmp135 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp136 = tl.where(tmp13, tmp134, tmp135)
tmp137 = tl.where(tmp15, tmp123, tmp136)
tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype)
tmp139 = tl.where(tmp13, tmp137, tmp138)
tmp140 = tl.load(in_ptr1 + (84 + x0 + 4 * x1), tmp108 & xmask, other=0.0)
tmp141 = tmp120 + tmp140
tmp142 = tl.full(tmp141.shape, 0.0, tmp141.dtype)
tmp143 = tl.where(tmp108, tmp141, tmp142)
tmp144 = tl.where(tmp15, tmp143, tmp135)
tmp145 = tl.full(tmp144.shape, 0.0, tmp144.dtype)
tmp146 = tl.where(tmp13, tmp144, tmp145)
tmp148 = tl.where(tmp13, tmp146, tmp147)
tmp149 = tl.where(tmp13, tmp139, tmp148)
tmp150 = tl.where(tmp5, tmp107, tmp149)
tmp151 = tl.where(tmp5, tmp6, tmp150)
tmp152 = tmp7 >= tmp1
tmp153 = tmp7 < tmp3
tmp154 = tmp152 & tmp153
tmp155 = tmp154 & tmp5
tmp156 = tmp5 & tmp155
tmp157 = tmp7 >= tmp8
tmp158 = tmp7 < tmp1
tmp159 = tmp157 & tmp158
tmp160 = tmp159 & tmp156
tmp161 = tmp5 & tmp160
tmp162 = tmp159 & tmp161
tmp163 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp162 & xmask, other=0.0)
tmp164 = tmp151 + tmp163
tmp165 = tl.full(tmp164.shape, 0.0, tmp164.dtype)
tmp166 = tl.where(tmp162, tmp164, tmp165)
tmp167 = tl.where(tmp159, tmp166, tmp151)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp161, tmp167, tmp168)
tmp170 = tl.where(tmp5, tmp169, tmp151)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp160, tmp170, tmp171)
tmp173 = tmp5 & tmp156
tmp174 = tmp159 & tmp173
tmp175 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp174 & xmask, other=0.0)
tmp176 = tmp151 + tmp175
tmp177 = tl.full(tmp176.shape, 0.0, tmp176.dtype)
tmp178 = tl.where(tmp174, tmp176, tmp177)
tmp179 = tl.where(tmp159, tmp178, tmp151)
tmp180 = tl.full(tmp179.shape, 0.0, tmp179.dtype)
tmp181 = tl.where(tmp173, tmp179, tmp180)
tmp182 = tl.where(tmp5, tmp181, tmp151)
tmp183 = tl.where(tmp159, tmp172, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp156, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp160 & xmask, other=0.0)
tmp187 = tmp151 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp160, tmp187, tmp188)
tmp190 = tl.where(tmp159, tmp189, tmp151)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp156, tmp190, tmp191)
tmp193 = tl.where(tmp5, tmp192, tmp151)
tmp194 = tl.where(tmp5, tmp185, tmp193)
tmp195 = tl.load(in_ptr1 + (120 + x0 + 4 * x1), tmp155 & xmask, other=0.0)
tmp196 = tmp194 + tmp195
tmp197 = tl.full(tmp196.shape, 0.0, tmp196.dtype)
tmp198 = tl.where(tmp155, tmp196, tmp197)
tmp199 = tmp5 & tmp5
tmp200 = tmp159 & tmp199
tmp201 = tmp5 & tmp200
tmp202 = tmp159 & tmp201
tmp203 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp202 & xmask, other=0.0)
tmp204 = tmp151 + tmp203
tmp205 = tl.full(tmp204.shape, 0.0, tmp204.dtype)
tmp206 = tl.where(tmp202, tmp204, tmp205)
tmp207 = tl.where(tmp159, tmp206, tmp151)
tmp208 = tl.full(tmp207.shape, 0.0, tmp207.dtype)
tmp209 = tl.where(tmp201, tmp207, tmp208)
tmp210 = tl.where(tmp5, tmp209, tmp151)
tmp211 = tl.full(tmp210.shape, 0.0, tmp210.dtype)
tmp212 = tl.where(tmp200, tmp210, tmp211)
tmp213 = tmp5 & tmp199
tmp214 = tmp159 & tmp213
tmp215 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp214 & xmask, other=0.0)
tmp216 = tmp151 + tmp215
tmp217 = tl.full(tmp216.shape, 0.0, tmp216.dtype)
tmp218 = tl.where(tmp214, tmp216, tmp217)
tmp219 = tl.where(tmp159, tmp218, tmp151)
tmp220 = tl.full(tmp219.shape, 0.0, tmp219.dtype)
tmp221 = tl.where(tmp213, tmp219, tmp220)
tmp222 = tl.where(tmp5, tmp221, tmp151)
tmp223 = tl.where(tmp159, tmp212, tmp222)
tmp224 = tl.full(tmp223.shape, 0.0, tmp223.dtype)
tmp225 = tl.where(tmp199, tmp223, tmp224)
tmp226 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp200 & xmask, other=0.0)
tmp227 = tmp151 + tmp226
tmp228 = tl.full(tmp227.shape, 0.0, tmp227.dtype)
tmp229 = tl.where(tmp200, tmp227, tmp228)
tmp230 = tl.where(tmp159, tmp229, tmp151)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp199, tmp230, tmp231)
tmp233 = tl.where(tmp5, tmp232, tmp151)
tmp234 = tl.where(tmp5, tmp225, tmp233)
tmp235 = tl.where(tmp154, tmp198, tmp234)
tmp236 = tl.full(tmp235.shape, 0.0, tmp235.dtype)
tmp237 = tl.where(tmp5, tmp235, tmp236)
tmp238 = tmp159 & tmp5
tmp239 = tmp5 & tmp238
tmp240 = tmp159 & tmp239
tmp241 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp240 & xmask, other=0.0)
tmp242 = tmp151 + tmp241
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp240, tmp242, tmp243)
tmp245 = tl.where(tmp159, tmp244, tmp151)
tmp246 = tl.full(tmp245.shape, 0.0, tmp245.dtype)
tmp247 = tl.where(tmp239, tmp245, tmp246)
tmp248 = tl.where(tmp5, tmp247, tmp151)
tmp249 = tl.full(tmp248.shape, 0.0, tmp248.dtype)
tmp250 = tl.where(tmp238, tmp248, tmp249)
tmp251 = tl.where(tmp159, tmp250, tmp233)
tmp252 = tl.full(tmp251.shape, 0.0, tmp251.dtype)
tmp253 = tl.where(tmp5, tmp251, tmp252)
tmp254 = tl.load(in_ptr1 + (108 + x0 + 4 * x1), tmp238 & xmask, other=0.0)
tmp255 = tmp151 + tmp254
tmp256 = tl.full(tmp255.shape, 0.0, tmp255.dtype)
tmp257 = tl.where(tmp238, tmp255, tmp256)
tmp258 = tl.where(tmp159, tmp257, tmp151)
tmp259 = tl.full(tmp258.shape, 0.0, tmp258.dtype)
tmp260 = tl.where(tmp5, tmp258, tmp259)
tmp261 = tl.where(tmp5, tmp260, tmp151)
tmp262 = tl.where(tmp5, tmp253, tmp261)
tmp263 = tl.where(tmp5, tmp237, tmp262)
tl.store(in_out_ptr0 + x2, tmp263, xmask)
@triton.jit
def triton_poi_fused_add_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp198 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = 12 + x1
tmp4 = tl.full([1], 8, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = tl.full([1], 12, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp0 >= tmp6
tmp11 = tmp10 & tmp9
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp0 >= tmp4
tmp16 = tmp0 < tmp6
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_ptr0 + (192 + x2), tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_ptr0 + (192 + x2), tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (192 + x2), tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (192 + x2), tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_ptr0 + (192 + x2), tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_ptr0 + (192 + x2), tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp9
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_ptr0 + (192 + x2), tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_ptr0 + (192 + x2), tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_ptr0 + (192 + x2), tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_ptr0 + (192 + x2), tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp9, tmp87, tmp88)
tmp90 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp9, tmp94, tmp95)
tmp97 = tmp17 & tmp9
tmp98 = tl.load(in_ptr0 + (192 + x2), tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp9, tmp99, tmp100)
tmp102 = tl.load(in_ptr0 + (192 + x2), tmp2 & xmask, other=0.0)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.load(in_ptr1 + (192 + x0 + 4 * x1), tmp2 & xmask, other=0.0)
tmp107 = tmp105 + tmp106
tmp108 = tl.full(tmp107.shape, 0.0, tmp107.dtype)
tmp109 = tl.where(tmp2, tmp107, tmp108)
tmp110 = tmp10 & tmp8
tmp111 = tmp8 & tmp110
tmp112 = tmp10 & tmp111
tmp113 = tmp8 & tmp112
tmp114 = tmp17 & tmp113
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp114 & xmask, other=0.0)
tmp116 = tl.load(in_ptr0 + (192 + x2), tmp113 & xmask, other=0.0)
tmp117 = tl.where(tmp17, tmp115, tmp116)
tmp118 = tl.full(tmp117.shape, 0.0, tmp117.dtype)
tmp119 = tl.where(tmp113, tmp117, tmp118)
tmp120 = tl.load(in_ptr0 + (192 + x2), tmp112 & xmask, other=0.0)
tmp121 = tl.where(tmp8, tmp119, tmp120)
tmp122 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp112 & xmask, other=0.0)
tmp123 = tmp121 + tmp122
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp112, tmp123, tmp124)
tmp126 = tmp8 & tmp111
tmp127 = tmp17 & tmp126
tmp128 = tl.load(in_ptr0 + (192 + x2), tmp127 & xmask, other=0.0)
tmp129 = tl.load(in_ptr0 + (192 + x2), tmp126 & xmask, other=0.0)
tmp130 = tl.where(tmp17, tmp128, tmp129)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp126, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp111 & xmask, other=0.0)
tmp134 = tl.where(tmp8, tmp132, tmp133)
tmp135 = tl.where(tmp10, tmp125, tmp134)
tmp136 = tl.full(tmp135.shape, 0.0, tmp135.dtype)
tmp137 = tl.where(tmp111, tmp135, tmp136)
tmp138 = tmp17 & tmp111
tmp139 = tl.load(in_ptr0 + (192 + x2), tmp138 & xmask, other=0.0)
tmp140 = tl.where(tmp17, tmp139, tmp133)
tmp141 = tl.full(tmp140.shape, 0.0, tmp140.dtype)
tmp142 = tl.where(tmp111, tmp140, tmp141)
tmp143 = tl.load(in_ptr0 + (192 + x2), tmp110 & xmask, other=0.0)
tmp144 = tl.where(tmp8, tmp142, tmp143)
tmp145 = tl.where(tmp8, tmp137, tmp144)
tmp146 = tl.full(tmp145.shape, 0.0, tmp145.dtype)
tmp147 = tl.where(tmp110, tmp145, tmp146)
tmp148 = tmp8 & tmp8
tmp149 = tmp10 & tmp148
tmp150 = tmp8 & tmp149
tmp151 = tmp17 & tmp150
tmp152 = tl.load(in_ptr0 + (192 + x2), tmp151 & xmask, other=0.0)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp150 & xmask, other=0.0)
tmp154 = tl.where(tmp17, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp150, tmp154, tmp155)
tmp157 = tl.load(in_ptr0 + (192 + x2), tmp149 & xmask, other=0.0)
tmp158 = tl.where(tmp8, tmp156, tmp157)
tmp159 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp149 & xmask, other=0.0)
tmp160 = tmp158 + tmp159
tmp161 = tl.full(tmp160.shape, 0.0, tmp160.dtype)
tmp162 = tl.where(tmp149, tmp160, tmp161)
tmp163 = tmp8 & tmp148
tmp164 = tmp17 & tmp163
tmp165 = tl.load(in_ptr0 + (192 + x2), tmp164 & xmask, other=0.0)
tmp166 = tl.load(in_ptr0 + (192 + x2), tmp163 & xmask, other=0.0)
tmp167 = tl.where(tmp17, tmp165, tmp166)
tmp168 = tl.full(tmp167.shape, 0.0, tmp167.dtype)
tmp169 = tl.where(tmp163, tmp167, tmp168)
tmp170 = tl.load(in_ptr0 + (192 + x2), tmp148 & xmask, other=0.0)
tmp171 = tl.where(tmp8, tmp169, tmp170)
tmp172 = tl.where(tmp10, tmp162, tmp171)
tmp173 = tl.full(tmp172.shape, 0.0, tmp172.dtype)
tmp174 = tl.where(tmp148, tmp172, tmp173)
tmp175 = tmp17 & tmp148
tmp176 = tl.load(in_ptr0 + (192 + x2), tmp175 & xmask, other=0.0)
tmp177 = tl.where(tmp17, tmp176, tmp170)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp148, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp181 = tl.where(tmp8, tmp179, tmp180)
tmp182 = tl.where(tmp8, tmp174, tmp181)
tmp183 = tl.where(tmp10, tmp147, tmp182)
tmp184 = tl.full(tmp183.shape, 0.0, tmp183.dtype)
tmp185 = tl.where(tmp8, tmp183, tmp184)
tmp186 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp110 & xmask, other=0.0)
tmp187 = tmp144 + tmp186
tmp188 = tl.full(tmp187.shape, 0.0, tmp187.dtype)
tmp189 = tl.where(tmp110, tmp187, tmp188)
tmp190 = tl.where(tmp10, tmp189, tmp181)
tmp191 = tl.full(tmp190.shape, 0.0, tmp190.dtype)
tmp192 = tl.where(tmp8, tmp190, tmp191)
tmp193 = tmp17 & tmp8
tmp194 = tl.load(in_ptr0 + (192 + x2), tmp193 & xmask, other=0.0)
tmp195 = tl.where(tmp17, tmp194, tmp180)
tmp196 = tl.full(tmp195.shape, 0.0, tmp195.dtype)
tmp197 = tl.where(tmp8, tmp195, tmp196)
tmp199 = tl.where(tmp8, tmp197, tmp198)
tmp200 = tl.where(tmp8, tmp192, tmp199)
tmp201 = tl.where(tmp8, tmp185, tmp200)
tmp202 = tl.where(tmp2, tmp109, tmp201)
tmp203 = tmp3 >= tmp6
tmp203 & tmp2
tmp205 = tl.where(tmp203, tmp202, tmp105)
tmp206 = tl.full(tmp205.shape, 0.0, tmp205.dtype)
tmp207 = tl.where(tmp2, tmp205, tmp206)
tmp208 = tl.where(tmp203, tmp202, tmp201)
tmp209 = tl.where(tmp2, tmp207, tmp208)
tl.store(out_ptr0 + x2, tmp202, xmask)
tl.store(out_ptr1 + x2, tmp209, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp102 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-192 + x2), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (-192 + x2), tmp2 & xmask, other=0.0)
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp0 >= tmp5
tmp7 = tmp0 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = x0
tmp10 = tmp9 >= tmp1
tmp11 = tmp10 & tmp8
tmp12 = tmp8 & tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp8 & tmp13
tmp15 = tmp9 >= tmp5
tmp16 = tmp9 < tmp1
tmp17 = tmp15 & tmp16
tmp18 = tmp17 & tmp14
tmp19 = tl.load(in_out_ptr0 + x2, tmp18 & xmask, other=0.0)
tmp20 = tl.load(in_out_ptr0 + x2, tmp14 & xmask, other=0.0)
tmp21 = tl.where(tmp17, tmp19, tmp20)
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp14, tmp21, tmp22)
tmp24 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp25 = tl.where(tmp8, tmp23, tmp24)
tmp26 = tl.load(in_ptr2 + (132 + x0 + 4 * x1), tmp13 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp13, tmp27, tmp28)
tmp30 = tmp8 & tmp12
tmp31 = tmp17 & tmp30
tmp32 = tl.load(in_out_ptr0 + x2, tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_out_ptr0 + x2, tmp30 & xmask, other=0.0)
tmp34 = tl.where(tmp17, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.load(in_out_ptr0 + x2, tmp12 & xmask, other=0.0)
tmp38 = tl.where(tmp8, tmp36, tmp37)
tmp39 = tl.where(tmp10, tmp29, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp12, tmp39, tmp40)
tmp42 = tmp17 & tmp12
tmp43 = tl.load(in_out_ptr0 + x2, tmp42 & xmask, other=0.0)
tmp44 = tl.where(tmp17, tmp43, tmp37)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp12, tmp44, tmp45)
tmp47 = tl.load(in_out_ptr0 + x2, tmp11 & xmask, other=0.0)
tmp48 = tl.where(tmp8, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp41, tmp48)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp11, tmp49, tmp50)
tmp52 = tmp8 & tmp8
tmp53 = tmp10 & tmp52
tmp54 = tmp8 & tmp53
tmp55 = tmp17 & tmp54
tmp56 = tl.load(in_out_ptr0 + x2, tmp55 & xmask, other=0.0)
tmp57 = tl.load(in_out_ptr0 + x2, tmp54 & xmask, other=0.0)
tmp58 = tl.where(tmp17, tmp56, tmp57)
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp54, tmp58, tmp59)
tmp61 = tl.load(in_out_ptr0 + x2, tmp53 & xmask, other=0.0)
tmp62 = tl.where(tmp8, tmp60, tmp61)
tmp63 = tl.load(in_ptr2 + (132 + x0 + 4 * x1), tmp53 & xmask, other=0.0)
tmp64 = tmp62 + tmp63
tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype)
tmp66 = tl.where(tmp53, tmp64, tmp65)
tmp67 = tmp8 & tmp52
tmp68 = tmp17 & tmp67
tmp69 = tl.load(in_out_ptr0 + x2, tmp68 & xmask, other=0.0)
tmp70 = tl.load(in_out_ptr0 + x2, tmp67 & xmask, other=0.0)
tmp71 = tl.where(tmp17, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp67, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + x2, tmp52 & xmask, other=0.0)
tmp75 = tl.where(tmp8, tmp73, tmp74)
tmp76 = tl.where(tmp10, tmp66, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp52, tmp76, tmp77)
tmp79 = tmp17 & tmp52
tmp80 = tl.load(in_out_ptr0 + x2, tmp79 & xmask, other=0.0)
tmp81 = tl.where(tmp17, tmp80, tmp74)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp52, tmp81, tmp82)
tmp84 = tl.load(in_out_ptr0 + x2, tmp8 & xmask, other=0.0)
tmp85 = tl.where(tmp8, tmp83, tmp84)
tmp86 = tl.where(tmp8, tmp78, tmp85)
tmp87 = tl.where(tmp10, tmp51, tmp86)
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp8, tmp87, tmp88)
tmp90 = tl.load(in_ptr2 + (132 + x0 + 4 * x1), tmp11 & xmask, other=0.0)
tmp91 = tmp48 + tmp90
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp11, tmp91, tmp92)
tmp94 = tl.where(tmp10, tmp93, tmp85)
tmp95 = tl.full(tmp94.shape, 0.0, tmp94.dtype)
tmp96 = tl.where(tmp8, tmp94, tmp95)
tmp97 = tmp17 & tmp8
tmp98 = tl.load(in_out_ptr0 + x2, tmp97 & xmask, other=0.0)
tmp99 = tl.where(tmp17, tmp98, tmp84)
tmp100 = tl.full(tmp99.shape, 0.0, tmp99.dtype)
tmp101 = tl.where(tmp8, tmp99, tmp100)
tmp103 = tl.where(tmp8, tmp101, tmp102)
tmp104 = tl.where(tmp8, tmp96, tmp103)
tmp105 = tl.where(tmp8, tmp89, tmp104)
tmp106 = tl.where(tmp2, tmp4, tmp105)
tmp107 = tl.where(tmp2, tmp3, tmp106)
tl.store(in_out_ptr0 + x2, tmp107, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp259 = tl.load(in_ptr0 + (192 + x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 8, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 12, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 12 + x1
tmp7 = tmp6 >= tmp3
tmp8 = tmp7 & tmp5
tmp9 = tmp5 & tmp8
tmp10 = tmp7 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 >= tmp11
tmp13 = tmp0 < tmp1
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp7 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr0 + (192 + x2), tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_ptr0 + (192 + x2), tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_ptr0 + (192 + x2), tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp7, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp7 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_ptr0 + (192 + x2), tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_ptr0 + (192 + x2), tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_ptr0 + (192 + x2), tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp7, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_ptr0 + (192 + x2), tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp7, tmp53, tmp54)
tmp56 = tl.where(tmp7, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (216 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp7 & tmp8
tmp62 = tmp14 & tmp61
tmp63 = tmp7 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_ptr0 + (192 + x2), tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_ptr0 + (192 + x2), tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_ptr0 + (192 + x2), tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp7, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp7 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_ptr0 + (192 + x2), tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_ptr0 + (192 + x2), tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_ptr0 + (192 + x2), tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp7, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_ptr0 + (192 + x2), tmp8 & xmask, other=0.0)
tmp102 = tl.where(tmp7, tmp100, tmp101)
tmp103 = tl.where(tmp7, tmp93, tmp102)
tmp104 = tl.where(tmp5, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp8, tmp104, tmp105)
tmp107 = tmp14 & tmp8
tmp108 = tmp7 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_ptr0 + (192 + x2), tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_ptr0 + (192 + x2), tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_ptr0 + (192 + x2), tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp7, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp8, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp8, tmp130, tmp131)
tmp133 = tl.load(in_ptr0 + (192 + x2), tmp5 & xmask, other=0.0)
tmp134 = tl.where(tmp7, tmp132, tmp133)
tmp135 = tl.where(tmp7, tmp125, tmp134)
tmp136 = tl.where(tmp7, tmp106, tmp135)
tmp137 = tl.full(tmp136.shape, 0.0, tmp136.dtype)
tmp138 = tl.where(tmp5, tmp136, tmp137)
tmp139 = tmp5 & tmp7
tmp140 = tmp7 & tmp139
tmp141 = tmp14 & tmp140
tmp142 = tmp7 & tmp141
tmp143 = tmp14 & tmp142
tmp144 = tl.load(in_ptr0 + (192 + x2), tmp143 & xmask, other=0.0)
tmp145 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp143 & xmask, other=0.0)
tmp146 = tmp144 + tmp145
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp143, tmp146, tmp147)
tmp149 = tl.load(in_ptr0 + (192 + x2), tmp142 & xmask, other=0.0)
tmp150 = tl.where(tmp14, tmp148, tmp149)
tmp151 = tl.full(tmp150.shape, 0.0, tmp150.dtype)
tmp152 = tl.where(tmp142, tmp150, tmp151)
tmp153 = tl.load(in_ptr0 + (192 + x2), tmp141 & xmask, other=0.0)
tmp154 = tl.where(tmp7, tmp152, tmp153)
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp141, tmp154, tmp155)
tmp157 = tmp7 & tmp140
tmp158 = tmp14 & tmp157
tmp159 = tl.load(in_ptr0 + (192 + x2), tmp158 & xmask, other=0.0)
tmp160 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp158 & xmask, other=0.0)
tmp161 = tmp159 + tmp160
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp158, tmp161, tmp162)
tmp164 = tl.load(in_ptr0 + (192 + x2), tmp157 & xmask, other=0.0)
tmp165 = tl.where(tmp14, tmp163, tmp164)
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp157, tmp165, tmp166)
tmp168 = tl.load(in_ptr0 + (192 + x2), tmp140 & xmask, other=0.0)
tmp169 = tl.where(tmp7, tmp167, tmp168)
tmp170 = tl.where(tmp14, tmp156, tmp169)
tmp171 = tl.full(tmp170.shape, 0.0, tmp170.dtype)
tmp172 = tl.where(tmp140, tmp170, tmp171)
tmp173 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp141 & xmask, other=0.0)
tmp174 = tmp153 + tmp173
tmp175 = tl.full(tmp174.shape, 0.0, tmp174.dtype)
tmp176 = tl.where(tmp141, tmp174, tmp175)
tmp177 = tl.where(tmp14, tmp176, tmp168)
tmp178 = tl.full(tmp177.shape, 0.0, tmp177.dtype)
tmp179 = tl.where(tmp140, tmp177, tmp178)
tmp180 = tl.load(in_ptr0 + (192 + x2), tmp139 & xmask, other=0.0)
tmp181 = tl.where(tmp7, tmp179, tmp180)
tmp182 = tl.where(tmp7, tmp172, tmp181)
tmp183 = tl.load(in_ptr1 + (216 + x0 + 4 * x1), tmp139 & xmask, other=0.0)
tmp184 = tmp182 + tmp183
tmp185 = tl.full(tmp184.shape, 0.0, tmp184.dtype)
tmp186 = tl.where(tmp139, tmp184, tmp185)
tmp187 = tmp7 & tmp7
tmp188 = tmp14 & tmp187
tmp189 = tmp7 & tmp188
tmp190 = tmp14 & tmp189
tmp191 = tl.load(in_ptr0 + (192 + x2), tmp190 & xmask, other=0.0)
tmp192 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp190 & xmask, other=0.0)
tmp193 = tmp191 + tmp192
tmp194 = tl.full(tmp193.shape, 0.0, tmp193.dtype)
tmp195 = tl.where(tmp190, tmp193, tmp194)
tmp196 = tl.load(in_ptr0 + (192 + x2), tmp189 & xmask, other=0.0)
tmp197 = tl.where(tmp14, tmp195, tmp196)
tmp198 = tl.full(tmp197.shape, 0.0, tmp197.dtype)
tmp199 = tl.where(tmp189, tmp197, tmp198)
tmp200 = tl.load(in_ptr0 + (192 + x2), tmp188 & xmask, other=0.0)
tmp201 = tl.where(tmp7, tmp199, tmp200)
tmp202 = tl.full(tmp201.shape, 0.0, tmp201.dtype)
tmp203 = tl.where(tmp188, tmp201, tmp202)
tmp204 = tmp7 & tmp187
tmp205 = tmp14 & tmp204
tmp206 = tl.load(in_ptr0 + (192 + x2), tmp205 & xmask, other=0.0)
tmp207 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp205 & xmask, other=0.0)
tmp208 = tmp206 + tmp207
tmp209 = tl.full(tmp208.shape, 0.0, tmp208.dtype)
tmp210 = tl.where(tmp205, tmp208, tmp209)
tmp211 = tl.load(in_ptr0 + (192 + x2), tmp204 & xmask, other=0.0)
tmp212 = tl.where(tmp14, tmp210, tmp211)
tmp213 = tl.full(tmp212.shape, 0.0, tmp212.dtype)
tmp214 = tl.where(tmp204, tmp212, tmp213)
tmp215 = tl.load(in_ptr0 + (192 + x2), tmp187 & xmask, other=0.0)
tmp216 = tl.where(tmp7, tmp214, tmp215)
tmp217 = tl.where(tmp14, tmp203, tmp216)
tmp218 = tl.full(tmp217.shape, 0.0, tmp217.dtype)
tmp219 = tl.where(tmp187, tmp217, tmp218)
tmp220 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp188 & xmask, other=0.0)
tmp221 = tmp200 + tmp220
tmp222 = tl.full(tmp221.shape, 0.0, tmp221.dtype)
tmp223 = tl.where(tmp188, tmp221, tmp222)
tmp224 = tl.where(tmp14, tmp223, tmp215)
tmp225 = tl.full(tmp224.shape, 0.0, tmp224.dtype)
tmp226 = tl.where(tmp187, tmp224, tmp225)
tmp227 = tl.load(in_ptr0 + (192 + x2), tmp7 & xmask, other=0.0)
tmp228 = tl.where(tmp7, tmp226, tmp227)
tmp229 = tl.where(tmp7, tmp219, tmp228)
tmp230 = tl.where(tmp5, tmp186, tmp229)
tmp231 = tl.full(tmp230.shape, 0.0, tmp230.dtype)
tmp232 = tl.where(tmp7, tmp230, tmp231)
tmp233 = tmp14 & tmp7
tmp234 = tmp7 & tmp233
tmp235 = tmp14 & tmp234
tmp236 = tl.load(in_ptr0 + (192 + x2), tmp235 & xmask, other=0.0)
tmp237 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp235 & xmask, other=0.0)
tmp238 = tmp236 + tmp237
tmp239 = tl.full(tmp238.shape, 0.0, tmp238.dtype)
tmp240 = tl.where(tmp235, tmp238, tmp239)
tmp241 = tl.load(in_ptr0 + (192 + x2), tmp234 & xmask, other=0.0)
tmp242 = tl.where(tmp14, tmp240, tmp241)
tmp243 = tl.full(tmp242.shape, 0.0, tmp242.dtype)
tmp244 = tl.where(tmp234, tmp242, tmp243)
tmp245 = tl.load(in_ptr0 + (192 + x2), tmp233 & xmask, other=0.0)
tmp246 = tl.where(tmp7, tmp244, tmp245)
tmp247 = tl.full(tmp246.shape, 0.0, tmp246.dtype)
tmp248 = tl.where(tmp233, tmp246, tmp247)
tmp249 = tl.where(tmp14, tmp248, tmp228)
tmp250 = tl.full(tmp249.shape, 0.0, tmp249.dtype)
tmp251 = tl.where(tmp7, tmp249, tmp250)
tmp252 = tl.load(in_ptr1 + (204 + x0 + 4 * x1), tmp233 & xmask, other=0.0)
tmp253 = tmp245 + tmp252
tmp254 = tl.full(tmp253.shape, 0.0, tmp253.dtype)
tmp255 = tl.where(tmp233, tmp253, tmp254)
tmp256 = tl.where(tmp14, tmp255, tmp227)
tmp257 = tl.full(tmp256.shape, 0.0, tmp256.dtype)
tmp258 = tl.where(tmp7, tmp256, tmp257)
tmp260 = tl.where(tmp7, tmp258, tmp259)
tmp261 = tl.where(tmp7, tmp251, tmp260)
tmp262 = tl.where(tmp7, tmp232, tmp261)
tmp263 = tl.where(tmp5, tmp138, tmp262)
tl.store(out_ptr0 + x2, tmp263, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
x0 = xindex % 16
tmp133 = tl.load(in_out_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 12, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.load(in_ptr0 + (-192 + x2), tmp2 & xmask, other=0.0)
tmp4 = x0
tmp5 = tl.full([1], 8, tl.int64)
tmp6 = tmp4 >= tmp5
tmp7 = tmp4 < tmp1
tmp8 = tmp6 & tmp7
tmp9 = tmp8 & tmp2
tmp10 = tmp2 & tmp9
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp4 >= tmp11
tmp13 = tmp4 < tmp5
tmp14 = tmp12 & tmp13
tmp15 = tmp14 & tmp10
tmp16 = tmp2 & tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_out_ptr0 + x2, tmp17 & xmask, other=0.0)
tmp19 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp17 & xmask, other=0.0)
tmp20 = tmp18 + tmp19
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp17, tmp20, tmp21)
tmp23 = tl.load(in_out_ptr0 + x2, tmp16 & xmask, other=0.0)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp16, tmp24, tmp25)
tmp27 = tl.load(in_out_ptr0 + x2, tmp15 & xmask, other=0.0)
tmp28 = tl.where(tmp2, tmp26, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp15, tmp28, tmp29)
tmp31 = tmp2 & tmp10
tmp32 = tmp14 & tmp31
tmp33 = tl.load(in_out_ptr0 + x2, tmp32 & xmask, other=0.0)
tmp34 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp32 & xmask, other=0.0)
tmp35 = tmp33 + tmp34
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp32, tmp35, tmp36)
tmp38 = tl.load(in_out_ptr0 + x2, tmp31 & xmask, other=0.0)
tmp39 = tl.where(tmp14, tmp37, tmp38)
tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype)
tmp41 = tl.where(tmp31, tmp39, tmp40)
tmp42 = tl.load(in_out_ptr0 + x2, tmp10 & xmask, other=0.0)
tmp43 = tl.where(tmp2, tmp41, tmp42)
tmp44 = tl.where(tmp14, tmp30, tmp43)
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp10, tmp44, tmp45)
tmp47 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp15 & xmask, other=0.0)
tmp48 = tmp27 + tmp47
tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype)
tmp50 = tl.where(tmp15, tmp48, tmp49)
tmp51 = tl.where(tmp14, tmp50, tmp42)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp10, tmp51, tmp52)
tmp54 = tl.load(in_out_ptr0 + x2, tmp9 & xmask, other=0.0)
tmp55 = tl.where(tmp2, tmp53, tmp54)
tmp56 = tl.where(tmp2, tmp46, tmp55)
tmp57 = tl.load(in_ptr1 + (168 + x0 + 4 * x1), tmp9 & xmask, other=0.0)
tmp58 = tmp56 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp9, tmp58, tmp59)
tmp61 = tmp2 & tmp2
tmp62 = tmp14 & tmp61
tmp63 = tmp2 & tmp62
tmp64 = tmp14 & tmp63
tmp65 = tl.load(in_out_ptr0 + x2, tmp64 & xmask, other=0.0)
tmp66 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp64 & xmask, other=0.0)
tmp67 = tmp65 + tmp66
tmp68 = tl.full(tmp67.shape, 0.0, tmp67.dtype)
tmp69 = tl.where(tmp64, tmp67, tmp68)
tmp70 = tl.load(in_out_ptr0 + x2, tmp63 & xmask, other=0.0)
tmp71 = tl.where(tmp14, tmp69, tmp70)
tmp72 = tl.full(tmp71.shape, 0.0, tmp71.dtype)
tmp73 = tl.where(tmp63, tmp71, tmp72)
tmp74 = tl.load(in_out_ptr0 + x2, tmp62 & xmask, other=0.0)
tmp75 = tl.where(tmp2, tmp73, tmp74)
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp62, tmp75, tmp76)
tmp78 = tmp2 & tmp61
tmp79 = tmp14 & tmp78
tmp80 = tl.load(in_out_ptr0 + x2, tmp79 & xmask, other=0.0)
tmp81 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp79 & xmask, other=0.0)
tmp82 = tmp80 + tmp81
tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype)
tmp84 = tl.where(tmp79, tmp82, tmp83)
tmp85 = tl.load(in_out_ptr0 + x2, tmp78 & xmask, other=0.0)
tmp86 = tl.where(tmp14, tmp84, tmp85)
tmp87 = tl.full(tmp86.shape, 0.0, tmp86.dtype)
tmp88 = tl.where(tmp78, tmp86, tmp87)
tmp89 = tl.load(in_out_ptr0 + x2, tmp61 & xmask, other=0.0)
tmp90 = tl.where(tmp2, tmp88, tmp89)
tmp91 = tl.where(tmp14, tmp77, tmp90)
tmp92 = tl.full(tmp91.shape, 0.0, tmp91.dtype)
tmp93 = tl.where(tmp61, tmp91, tmp92)
tmp94 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp62 & xmask, other=0.0)
tmp95 = tmp74 + tmp94
tmp96 = tl.full(tmp95.shape, 0.0, tmp95.dtype)
tmp97 = tl.where(tmp62, tmp95, tmp96)
tmp98 = tl.where(tmp14, tmp97, tmp89)
tmp99 = tl.full(tmp98.shape, 0.0, tmp98.dtype)
tmp100 = tl.where(tmp61, tmp98, tmp99)
tmp101 = tl.load(in_out_ptr0 + x2, tmp2 & xmask, other=0.0)
tmp102 = tl.where(tmp2, tmp100, tmp101)
tmp103 = tl.where(tmp2, tmp93, tmp102)
tmp104 = tl.where(tmp8, tmp60, tmp103)
tmp105 = tl.full(tmp104.shape, 0.0, tmp104.dtype)
tmp106 = tl.where(tmp2, tmp104, tmp105)
tmp107 = tmp14 & tmp2
tmp108 = tmp2 & tmp107
tmp109 = tmp14 & tmp108
tmp110 = tl.load(in_out_ptr0 + x2, tmp109 & xmask, other=0.0)
tmp111 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp109 & xmask, other=0.0)
tmp112 = tmp110 + tmp111
tmp113 = tl.full(tmp112.shape, 0.0, tmp112.dtype)
tmp114 = tl.where(tmp109, tmp112, tmp113)
tmp115 = tl.load(in_out_ptr0 + x2, tmp108 & xmask, other=0.0)
tmp116 = tl.where(tmp14, tmp114, tmp115)
tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype)
tmp118 = tl.where(tmp108, tmp116, tmp117)
tmp119 = tl.load(in_out_ptr0 + x2, tmp107 & xmask, other=0.0)
tmp120 = tl.where(tmp2, tmp118, tmp119)
tmp121 = tl.full(tmp120.shape, 0.0, tmp120.dtype)
tmp122 = tl.where(tmp107, tmp120, tmp121)
tmp123 = tl.where(tmp14, tmp122, tmp102)
tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype)
tmp125 = tl.where(tmp2, tmp123, tmp124)
tmp126 = tl.load(in_ptr1 + (156 + x0 + 4 * x1), tmp107 & xmask, other=0.0)
tmp127 = tmp119 + tmp126
tmp128 = tl.full(tmp127.shape, 0.0, tmp127.dtype)
tmp129 = tl.where(tmp107, tmp127, tmp128)
tmp130 = tl.where(tmp14, tmp129, tmp101)
tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype)
tmp132 = tl.where(tmp2, tmp130, tmp131)
tmp134 = tl.where(tmp2, tmp132, tmp133)
tmp135 = tl.where(tmp2, tmp125, tmp134)
tmp136 = tl.where(tmp2, tmp106, tmp135)
tmp137 = tl.where(tmp2, tmp3, tmp136)
tmp138 = tmp4 >= tmp1
tmp139 = tmp138 & tmp2
tmp140 = tmp2 & tmp139
tmp141 = tmp138 & tmp140
tmp142 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp141 & xmask, other=0.0)
tmp143 = tmp137 + tmp142
tmp144 = tl.full(tmp143.shape, 0.0, tmp143.dtype)
tmp145 = tl.where(tmp141, tmp143, tmp144)
tmp146 = tl.where(tmp138, tmp145, tmp137)
tmp147 = tl.full(tmp146.shape, 0.0, tmp146.dtype)
tmp148 = tl.where(tmp140, tmp146, tmp147)
tmp149 = tl.where(tmp2, tmp148, tmp137)
tmp150 = tl.full(tmp149.shape, 0.0, tmp149.dtype)
tmp151 = tl.where(tmp139, tmp149, tmp150)
tmp152 = tmp138 & tmp61
tmp153 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp152 & xmask, other=0.0)
tmp154 = tmp137 + tmp153
tmp155 = tl.full(tmp154.shape, 0.0, tmp154.dtype)
tmp156 = tl.where(tmp152, tmp154, tmp155)
tmp157 = tl.where(tmp138, tmp156, tmp137)
tmp158 = tl.full(tmp157.shape, 0.0, tmp157.dtype)
tmp159 = tl.where(tmp61, tmp157, tmp158)
tmp160 = tl.where(tmp2, tmp159, tmp137)
tmp161 = tl.where(tmp138, tmp151, tmp160)
tmp162 = tl.full(tmp161.shape, 0.0, tmp161.dtype)
tmp163 = tl.where(tmp2, tmp161, tmp162)
tmp164 = tl.load(in_ptr1 + (180 + x0 + 4 * x1), tmp139 & xmask, other=0.0)
tmp165 = tmp137 + tmp164
tmp166 = tl.full(tmp165.shape, 0.0, tmp165.dtype)
tmp167 = tl.where(tmp139, tmp165, tmp166)
tmp168 = tl.where(tmp138, tmp167, tmp137)
tmp169 = tl.full(tmp168.shape, 0.0, tmp168.dtype)
tmp170 = tl.where(tmp2, tmp168, tmp169)
tmp171 = tl.where(tmp2, tmp170, tmp137)
tmp172 = tl.where(tmp2, tmp163, tmp171)
tl.store(in_out_ptr0 + x2, tmp172, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_1[grid(64)](buf0, arg0_1, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
buf3 = buf2
del buf2
triton_poi_fused_add_zeros_2[grid(256)](buf3, buf1, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_add_3[grid(64)](buf3, arg0_1, buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf6 = buf3
del buf3
triton_poi_fused_add_4[grid(256)](buf6, buf5, buf4, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_5[grid(64)](buf6, arg0_1, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
buf9 = buf8
del buf8
triton_poi_fused_add_6[grid(256)](buf9, buf7, arg0_1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf10 = buf7
del buf7
buf11 = buf4
del buf4
triton_poi_fused_add_7[grid(64)](buf9, arg0_1, buf10, buf11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf12 = buf9
del buf9
triton_poi_fused_add_8[grid(256)](buf12, buf11, buf10, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf10
buf13 = buf11
del buf11
triton_poi_fused_9[grid(64)](buf12, arg0_1, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = buf12
del buf12
buf15 = buf14
del buf14
triton_poi_fused_add_10[grid(256)](buf15, buf13, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf13
return buf15,
class Truncation2DNew(torch.nn.Module):
"""
A module merging the last two dimensions, merging coarse scale in grid
of dimensions -4, -3 and finer resolution in dimensions -2, -1 to
one fine grained grid with two dimensions less.
"""
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
kpoeppel/pytorch_probgraph
|
Truncation2D
| false
| 15,972
|
[
"BSD-3-Clause"
] | 47
|
b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0
|
https://github.com/kpoeppel/pytorch_probgraph/tree/b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0
|
ScaledDotProductAttention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention --baseline version"""
def __init__(self, dropout=0.3):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3
)
del arg2_1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention --baseline version"""
def __init__(self, dropout=0.3):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
Yottaxx/T-LSTM
|
ScaledDotProductAttention
| false
| 18,157
|
[
"MIT"
] | 9
|
92618d8c3ee2418b194a2e1592512548da955b77
|
https://github.com/Yottaxx/T-LSTM/tree/92618d8c3ee2418b194a2e1592512548da955b77
|
Model_CIFAR10_CNN
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model_CIFAR10_CNN(nn.Module):
def __init__(self):
super(Model_CIFAR10_CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf15 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(4)](buf12, buf15, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf12
return (buf15, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
buf15, primals_10, primals_8, primals_6)
class Model_CIFAR10_CNNNew(nn.Module):
def __init__(self):
super(Model_CIFAR10_CNNNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Pluriscient/learn-to-learn
|
Model_CIFAR10_CNN
| false
| 11,801
|
[
"MIT"
] | 0
|
4aa0143522eb90f6439b83ed424d12b434cb344b
|
https://github.com/Pluriscient/learn-to-learn/tree/4aa0143522eb90f6439b83ed424d12b434cb344b
|
FPNHead
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class FPNHead(nn.Module):
def __init__(self, num_in, num_mid, num_out):
super().__init__()
self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1,
bias=False)
self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, x):
x = nn.functional.relu(self.block0(x), inplace=True)
x = nn.functional.relu(self.block1(x), inplace=True)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_in': 4, 'num_mid': 4, 'num_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=256, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, buf4,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
class FPNHeadNew(nn.Module):
def __init__(self, num_in, num_mid, num_out):
super().__init__()
self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1,
bias=False)
self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1,
bias=False)
def forward(self, input_0):
primals_1 = self.block0.weight
primals_3 = self.block1.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
lePossum/DeblurGANv2
|
FPNHead
| false
| 10,489
|
[
"BSD-2-Clause"
] | 0
|
b02c86de98f98604e2416a3a6121110ede7a2de9
|
https://github.com/lePossum/DeblurGANv2/tree/b02c86de98f98604e2416a3a6121110ede7a2de9
|
TAM
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v3/cv3dgybcc7jv6jepupgpr7hnjijy3kfsyf6alnhse5bot5d22ca4.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.sum, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => sum_1
# out_2 => relu
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_3, [0]), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%sum_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_sum_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_sum_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_sum_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_sum_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tmp0 >= tmp0
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = (-1) + x1
tmp5 = tmp4 >= tmp0
tmp6 = tmp5 & tmp3
tmp7 = tl.load(in_ptr0 + ((-64) + x2), tmp6 & xmask, other=0.0)
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp3, tmp7, tmp8)
tmp10 = tmp0 >= tmp2
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_out_ptr0 + (x2), tmp13 & xmask, other=0.0)
tmp15 = tmp0 >= tmp11
tmp16 = tl.full([1], 3, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = 1 + x1
tmp19 = tl.full([1], 4, tl.int64)
tmp20 = tmp18 < tmp19
tmp21 = tmp20 & tmp15
tmp22 = tl.load(in_ptr1 + (64 + x2), tmp21 & xmask, other=0.0)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp15, tmp22, tmp23)
tmp25 = tl.where(tmp13, tmp14, tmp24)
tmp26 = tl.where(tmp3, tmp9, tmp25)
tmp27 = tmp2 >= tmp0
tmp28 = tmp2 < tmp2
tmp29 = tmp5 & tmp28
tmp30 = tl.load(in_ptr0 + ((-64) + x2), tmp29 & xmask, other=0.0)
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp28, tmp30, tmp31)
tmp33 = tmp2 >= tmp2
tmp34 = tmp2 < tmp11
tmp35 = tmp33 & tmp34
tmp36 = tl.load(in_out_ptr0 + (x2), tmp35 & xmask, other=0.0)
tmp37 = tmp2 >= tmp11
tmp38 = tmp2 < tmp16
tmp39 = tmp20 & tmp37
tmp40 = tl.load(in_ptr1 + (64 + x2), tmp39 & xmask, other=0.0)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp37, tmp40, tmp41)
tmp43 = tl.where(tmp35, tmp36, tmp42)
tmp44 = tl.where(tmp28, tmp32, tmp43)
tmp45 = tmp26 + tmp44
tmp46 = tmp11 >= tmp0
tmp47 = tmp11 < tmp2
tmp48 = tmp5 & tmp47
tmp49 = tl.load(in_ptr0 + ((-64) + x2), tmp48 & xmask, other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp47, tmp49, tmp50)
tmp52 = tmp11 >= tmp2
tmp53 = tmp11 < tmp11
tmp54 = tmp52 & tmp53
tmp55 = tl.load(in_out_ptr0 + (x2), tmp54 & xmask, other=0.0)
tmp56 = tmp11 >= tmp11
tmp57 = tmp11 < tmp16
tmp58 = tmp20 & tmp56
tmp59 = tl.load(in_ptr1 + (64 + x2), tmp58 & xmask, other=0.0)
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp56, tmp59, tmp60)
tmp62 = tl.where(tmp54, tmp55, tmp61)
tmp63 = tl.where(tmp47, tmp51, tmp62)
tmp64 = tmp45 + tmp63
tmp65 = tl.full([1], 0, tl.int32)
tmp66 = triton_helpers.maximum(tmp65, tmp64)
tmp67 = 0.0
tmp68 = tmp66 <= tmp67
tl.store(out_ptr0 + (x2), tmp68, xmask)
tl.store(out_ptr1 + (x2), tmp66, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.sum, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_sum_threshold_backward_0.run(buf3, buf0, buf2, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf0
del buf2
del buf3
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, primals_3, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_sum_threshold_backward_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x2 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = -1 + x1
tmp5 = tmp4 >= tmp0
tmp6 = tmp5 & tmp3
tmp7 = tl.load(in_ptr0 + (-64 + x2), tmp6 & xmask, other=0.0)
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp3, tmp7, tmp8)
tmp10 = tmp0 >= tmp2
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_out_ptr0 + x2, tmp13 & xmask, other=0.0)
tmp15 = tmp0 >= tmp11
tl.full([1], 3, tl.int64)
tmp18 = 1 + x1
tmp19 = tl.full([1], 4, tl.int64)
tmp20 = tmp18 < tmp19
tmp21 = tmp20 & tmp15
tmp22 = tl.load(in_ptr1 + (64 + x2), tmp21 & xmask, other=0.0)
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp15, tmp22, tmp23)
tmp25 = tl.where(tmp13, tmp14, tmp24)
tmp26 = tl.where(tmp3, tmp9, tmp25)
tmp28 = tmp2 < tmp2
tmp29 = tmp5 & tmp28
tmp30 = tl.load(in_ptr0 + (-64 + x2), tmp29 & xmask, other=0.0)
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp28, tmp30, tmp31)
tmp33 = tmp2 >= tmp2
tmp34 = tmp2 < tmp11
tmp35 = tmp33 & tmp34
tmp36 = tl.load(in_out_ptr0 + x2, tmp35 & xmask, other=0.0)
tmp37 = tmp2 >= tmp11
tmp39 = tmp20 & tmp37
tmp40 = tl.load(in_ptr1 + (64 + x2), tmp39 & xmask, other=0.0)
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp37, tmp40, tmp41)
tmp43 = tl.where(tmp35, tmp36, tmp42)
tmp44 = tl.where(tmp28, tmp32, tmp43)
tmp45 = tmp26 + tmp44
tmp47 = tmp11 < tmp2
tmp48 = tmp5 & tmp47
tmp49 = tl.load(in_ptr0 + (-64 + x2), tmp48 & xmask, other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp47, tmp49, tmp50)
tmp52 = tmp11 >= tmp2
tmp53 = tmp11 < tmp11
tmp54 = tmp52 & tmp53
tmp55 = tl.load(in_out_ptr0 + x2, tmp54 & xmask, other=0.0)
tmp56 = tmp11 >= tmp11
tmp58 = tmp20 & tmp56
tmp59 = tl.load(in_ptr1 + (64 + x2), tmp58 & xmask, other=0.0)
tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype)
tmp61 = tl.where(tmp56, tmp59, tmp60)
tmp62 = tl.where(tmp54, tmp55, tmp61)
tmp63 = tl.where(tmp47, tmp51, tmp62)
tmp64 = tmp45 + tmp63
tmp65 = tl.full([1], 0, tl.int32)
tmp66 = triton_helpers.maximum(tmp65, tmp64)
tmp67 = 0.0
tmp68 = tmp66 <= tmp67
tl.store(out_ptr0 + x2, tmp68, xmask)
tl.store(out_ptr1 + x2, tmp66, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_2, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_2, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf1
buf4 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.bool)
buf5 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_sum_threshold_backward_0[grid(256)](buf3,
buf0, buf2, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf2
del buf3
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, primals_3, primals_4, buf4
class SEModule(nn.Module):
def __init__(self, channels, dw_conv):
super().__init__()
ks = 1
pad = (ks - 1) // 2
self.fc1 = nn.Conv2d(channels, channels, kernel_size=ks, padding=
pad, groups=channels if dw_conv else 1, bias=False)
def forward(self, x):
x = self.fc1(x)
return x
class TAMNew(nn.Module):
def __init__(self, duration, channels, dw_conv=True, blending_frames=3,
blending_method='sum'):
super().__init__()
self.blending_frames = blending_frames
self.blending_method = blending_method
if blending_frames == 3:
self.prev_se = SEModule(channels, dw_conv)
self.next_se = SEModule(channels, dw_conv)
self.curr_se = SEModule(channels, dw_conv)
else:
self.blending_layers = nn.ModuleList([SEModule(channels,
dw_conv) for _ in range(blending_frames)])
self.relu = nn.ReLU(inplace=True)
self.duration = duration
def name(self):
return 'TAM-b{}-{}'.format(self.blending_frames, self.blending_method)
def forward(self, input_0):
primals_1 = self.prev_se.fc1.weight
primals_3 = self.next_se.fc1.weight
primals_4 = self.curr_se.fc1.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ZijiaLewisLu/action-recognition-pytorch
|
TAM
| false
| 14,739
|
[
"Apache-2.0"
] | 149
|
6ee04ed249081eb0d8e1b4a3e7a5c11fa65b8d70
|
https://github.com/ZijiaLewisLu/action-recognition-pytorch/tree/6ee04ed249081eb0d8e1b4a3e7a5c11fa65b8d70
|
bodypose_model
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_5 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xq/cxqz2dr7nh2qabrtemj52pazmhrknj5ltcy32ka252ia6a3jgpqi.py
# Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_6 => convolution_2
# input_7 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/pr/cpri5daxkfbmt5ostbhb5o2avircr64a2rmdkxfackaxyjfc7owe.py
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_10 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/of/cof37d5wbqzvtkioj7k4me7wqpvfv55rs62ytonj7gij2o3abnod.py
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_11 => convolution_4
# input_12 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/mn/cmnzsv2cdbsuq2sygridqvwumzmcvknuthlumel5m25l2ajsr4ft.py
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_19 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ic/cicsjqc3cfcjzqlztx4hz7ssqwe47ngo3g2onc6463k3vgfmt5cw.py
# Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_20 => convolution_8
# input_21 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/rs/crsb2j7t6kjc2dizrgavde3h3rerob3nhf7iqux6o24562lkvvoe.py
# Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_24 => convolution_10
# input_25 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/qy/cqyis4pzdzl2zcpdenz7kfyw4uxhak4ugnkkhusp7xtxj4qytdez.py
# Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_26 => convolution_11
# input_27 => relu_11
# Graph fragment:
# %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_11 : [num_users=8] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/xh/cxh5qnz7467zwx7kksukcyl5yqbimdzz2jusq6gmtz3v7ngsbddj.py
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out2 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %convolution_21, %relu_11], 1), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 47360
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64) % 185
x0 = xindex % 64
x2 = (xindex // 11840)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 38, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (2432*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 57, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (64*((-38) + x1)) + (1216*x2)), tmp13 & xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + ((-38) + x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 185, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tl.load(in_ptr4 + (x0 + (64*((-57) + x1)) + (8192*x2)), tmp19 & xmask, other=0.0)
tmp23 = tl.where(tmp13, tmp18, tmp22)
tmp24 = tl.where(tmp4, tmp9, tmp23)
tl.store(out_ptr0 + (x3), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/4y/c4y6uyvhmsek266ivpsqvnnoksgofgtz3h3rggm6nksziikdh57s.py
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_162 => convolution_84
# Graph fragment:
# %convolution_84 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_73, %primals_170, %primals_171, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 38
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/ce/ccekwplfoihswfouqfjqcwmfd2cg37pkjpmovikpxyaqzec4g3iq.py
# Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# input_175 => convolution_91
# input_176 => relu_80
# Graph fragment:
# %convolution_91 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_79, %primals_184, %primals_185, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_80 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_91,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_80, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 64) % 19
x2 = (xindex // 1216)
x3 = xindex % 1216
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256, ), (1, ))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_29, (128, ), (1, ))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128, ), (1, ))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512, ), (1, ))
assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (38, ), (1, ))
assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128, ), (1, ))
assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (128, ), (1, ))
assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_43, (512, ), (1, ))
assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_45, (19, ), (1, ))
assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_47, (128, ), (1, ))
assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_49, (128, ), (1, ))
assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_51, (128, ), (1, ))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128, ), (1, ))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128, ), (1, ))
assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_57, (128, ), (1, ))
assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_59, (38, ), (1, ))
assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_61, (128, ), (1, ))
assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_63, (128, ), (1, ))
assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_65, (128, ), (1, ))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128, ), (1, ))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128, ), (1, ))
assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_71, (128, ), (1, ))
assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_73, (19, ), (1, ))
assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_75, (128, ), (1, ))
assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_77, (128, ), (1, ))
assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_79, (128, ), (1, ))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128, ), (1, ))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128, ), (1, ))
assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_85, (128, ), (1, ))
assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_87, (38, ), (1, ))
assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_89, (128, ), (1, ))
assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_91, (128, ), (1, ))
assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_93, (128, ), (1, ))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128, ), (1, ))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128, ), (1, ))
assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_99, (128, ), (1, ))
assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_101, (19, ), (1, ))
assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_103, (128, ), (1, ))
assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_105, (128, ), (1, ))
assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_107, (128, ), (1, ))
assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_109, (128, ), (1, ))
assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_111, (128, ), (1, ))
assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_113, (128, ), (1, ))
assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_115, (38, ), (1, ))
assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_117, (128, ), (1, ))
assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_119, (128, ), (1, ))
assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_121, (128, ), (1, ))
assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_123, (128, ), (1, ))
assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_125, (128, ), (1, ))
assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_127, (128, ), (1, ))
assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_129, (19, ), (1, ))
assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_131, (128, ), (1, ))
assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_133, (128, ), (1, ))
assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_135, (128, ), (1, ))
assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_137, (128, ), (1, ))
assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_139, (128, ), (1, ))
assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_141, (128, ), (1, ))
assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_143, (38, ), (1, ))
assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_145, (128, ), (1, ))
assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_147, (128, ), (1, ))
assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_149, (128, ), (1, ))
assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_151, (128, ), (1, ))
assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_153, (128, ), (1, ))
assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_155, (128, ), (1, ))
assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_157, (19, ), (1, ))
assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_159, (128, ), (1, ))
assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_161, (128, ), (1, ))
assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_163, (128, ), (1, ))
assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_165, (128, ), (1, ))
assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_167, (128, ), (1, ))
assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_169, (128, ), (1, ))
assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_171, (38, ), (1, ))
assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_173, (128, ), (1, ))
assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_175, (128, ), (1, ))
assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_177, (128, ), (1, ))
assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_179, (128, ), (1, ))
assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_181, (128, ), (1, ))
assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_183, (128, ), (1, ))
assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_185, (19, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [input_8, input_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [input_17, input_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf19, primals_17, 262144, grid=grid(262144), stream=stream0)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [input_22], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [input_22, input_23], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [input_24], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf27, primals_23, 65536, grid=grid(65536), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [input_26], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf29, primals_25, 32768, grid=grid(32768), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [input_28], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [input_28, input_29], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf31, primals_27, 32768, grid=grid(32768), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [input_30], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [input_30, input_31], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf33, primals_29, 32768, grid=grid(32768), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [input_32], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [input_32, input_33], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf35, primals_31, 32768, grid=grid(32768), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [input_34], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf37, primals_33, 131072, grid=grid(131072), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [input_36], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_37], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1))
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [input_37, input_38], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf40, primals_37, 32768, grid=grid(32768), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [input_39], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1))
buf42 = buf41; del buf41 # reuse
# Topologically Sorted Source Nodes: [input_39, input_40], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf42, primals_39, 32768, grid=grid(32768), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [input_41], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = buf43; del buf43 # reuse
# Topologically Sorted Source Nodes: [input_41, input_42], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf44, primals_41, 32768, grid=grid(32768), stream=stream0)
del primals_41
# Topologically Sorted Source Nodes: [input_43], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1))
buf46 = buf45; del buf45 # reuse
# Topologically Sorted Source Nodes: [input_43, input_44], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf46, primals_43, 131072, grid=grid(131072), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [input_45], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1))
buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf38, primals_35, buf47, primals_45, buf29, buf48, 47360, grid=grid(47360), stream=stream0)
del buf38
del buf47
del primals_35
del primals_45
# Topologically Sorted Source Nodes: [input_46], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1))
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [input_46, input_47], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf50, primals_47, 32768, grid=grid(32768), stream=stream0)
del primals_47
# Topologically Sorted Source Nodes: [input_48], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1))
buf52 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [input_48, input_49], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf52, primals_49, 32768, grid=grid(32768), stream=stream0)
del primals_49
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1))
buf54 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [input_50, input_51], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf54, primals_51, 32768, grid=grid(32768), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [input_52], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1))
buf56 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [input_52, input_53], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf56, primals_53, 32768, grid=grid(32768), stream=stream0)
del primals_53
# Topologically Sorted Source Nodes: [input_54], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1))
buf58 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [input_54, input_55], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf58, primals_55, 32768, grid=grid(32768), stream=stream0)
del primals_55
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1))
buf60 = buf59; del buf59 # reuse
# Topologically Sorted Source Nodes: [input_56, input_57], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf60, primals_57, 32768, grid=grid(32768), stream=stream0)
del primals_57
# Topologically Sorted Source Nodes: [input_58], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_59], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62; del buf62 # reuse
# Topologically Sorted Source Nodes: [input_59, input_60], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf63, primals_61, 32768, grid=grid(32768), stream=stream0)
del primals_61
# Topologically Sorted Source Nodes: [input_61], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [input_61, input_62], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf65, primals_63, 32768, grid=grid(32768), stream=stream0)
del primals_63
# Topologically Sorted Source Nodes: [input_63], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1))
buf67 = buf66; del buf66 # reuse
# Topologically Sorted Source Nodes: [input_63, input_64], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf67, primals_65, 32768, grid=grid(32768), stream=stream0)
del primals_65
# Topologically Sorted Source Nodes: [input_65], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [input_65, input_66], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf69, primals_67, 32768, grid=grid(32768), stream=stream0)
del primals_67
# Topologically Sorted Source Nodes: [input_67], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70; del buf70 # reuse
# Topologically Sorted Source Nodes: [input_67, input_68], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf71, primals_69, 32768, grid=grid(32768), stream=stream0)
del primals_69
# Topologically Sorted Source Nodes: [input_69], Original ATen: [aten.convolution]
buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72; del buf72 # reuse
# Topologically Sorted Source Nodes: [input_69, input_70], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf73, primals_71, 32768, grid=grid(32768), stream=stream0)
del primals_71
# Topologically Sorted Source Nodes: [input_71], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1))
buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf61, primals_59, buf74, primals_73, buf29, buf75, 47360, grid=grid(47360), stream=stream0)
del buf61
del buf74
del primals_59
del primals_73
# Topologically Sorted Source Nodes: [input_72], Original ATen: [aten.convolution]
buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76; del buf76 # reuse
# Topologically Sorted Source Nodes: [input_72, input_73], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf77, primals_75, 32768, grid=grid(32768), stream=stream0)
del primals_75
# Topologically Sorted Source Nodes: [input_74], Original ATen: [aten.convolution]
buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [input_74, input_75], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf79, primals_77, 32768, grid=grid(32768), stream=stream0)
del primals_77
# Topologically Sorted Source Nodes: [input_76], Original ATen: [aten.convolution]
buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1))
buf81 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [input_76, input_77], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf81, primals_79, 32768, grid=grid(32768), stream=stream0)
del primals_79
# Topologically Sorted Source Nodes: [input_78], Original ATen: [aten.convolution]
buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [input_78, input_79], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf83, primals_81, 32768, grid=grid(32768), stream=stream0)
del primals_81
# Topologically Sorted Source Nodes: [input_80], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [input_80, input_81], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf85, primals_83, 32768, grid=grid(32768), stream=stream0)
del primals_83
# Topologically Sorted Source Nodes: [input_82], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [input_82, input_83], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf87, primals_85, 32768, grid=grid(32768), stream=stream0)
del primals_85
# Topologically Sorted Source Nodes: [input_84], Original ATen: [aten.convolution]
buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_85], Original ATen: [aten.convolution]
buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1))
buf90 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [input_85, input_86], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf90, primals_89, 32768, grid=grid(32768), stream=stream0)
del primals_89
# Topologically Sorted Source Nodes: [input_87], Original ATen: [aten.convolution]
buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1))
buf92 = buf91; del buf91 # reuse
# Topologically Sorted Source Nodes: [input_87, input_88], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf92, primals_91, 32768, grid=grid(32768), stream=stream0)
del primals_91
# Topologically Sorted Source Nodes: [input_89], Original ATen: [aten.convolution]
buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1))
buf94 = buf93; del buf93 # reuse
# Topologically Sorted Source Nodes: [input_89, input_90], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf94, primals_93, 32768, grid=grid(32768), stream=stream0)
del primals_93
# Topologically Sorted Source Nodes: [input_91], Original ATen: [aten.convolution]
buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1))
buf96 = buf95; del buf95 # reuse
# Topologically Sorted Source Nodes: [input_91, input_92], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf96, primals_95, 32768, grid=grid(32768), stream=stream0)
del primals_95
# Topologically Sorted Source Nodes: [input_93], Original ATen: [aten.convolution]
buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1))
buf98 = buf97; del buf97 # reuse
# Topologically Sorted Source Nodes: [input_93, input_94], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf98, primals_97, 32768, grid=grid(32768), stream=stream0)
del primals_97
# Topologically Sorted Source Nodes: [input_95], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1))
buf100 = buf99; del buf99 # reuse
# Topologically Sorted Source Nodes: [input_95, input_96], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf100, primals_99, 32768, grid=grid(32768), stream=stream0)
del primals_99
# Topologically Sorted Source Nodes: [input_97], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1))
buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf88, primals_87, buf101, primals_101, buf29, buf102, 47360, grid=grid(47360), stream=stream0)
del buf101
del buf88
del primals_101
del primals_87
# Topologically Sorted Source Nodes: [input_98], Original ATen: [aten.convolution]
buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1))
buf104 = buf103; del buf103 # reuse
# Topologically Sorted Source Nodes: [input_98, input_99], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf104, primals_103, 32768, grid=grid(32768), stream=stream0)
del primals_103
# Topologically Sorted Source Nodes: [input_100], Original ATen: [aten.convolution]
buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1))
buf106 = buf105; del buf105 # reuse
# Topologically Sorted Source Nodes: [input_100, input_101], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf106, primals_105, 32768, grid=grid(32768), stream=stream0)
del primals_105
# Topologically Sorted Source Nodes: [input_102], Original ATen: [aten.convolution]
buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1))
buf108 = buf107; del buf107 # reuse
# Topologically Sorted Source Nodes: [input_102, input_103], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf108, primals_107, 32768, grid=grid(32768), stream=stream0)
del primals_107
# Topologically Sorted Source Nodes: [input_104], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1))
buf110 = buf109; del buf109 # reuse
# Topologically Sorted Source Nodes: [input_104, input_105], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf110, primals_109, 32768, grid=grid(32768), stream=stream0)
del primals_109
# Topologically Sorted Source Nodes: [input_106], Original ATen: [aten.convolution]
buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1))
buf112 = buf111; del buf111 # reuse
# Topologically Sorted Source Nodes: [input_106, input_107], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf112, primals_111, 32768, grid=grid(32768), stream=stream0)
del primals_111
# Topologically Sorted Source Nodes: [input_108], Original ATen: [aten.convolution]
buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1))
buf114 = buf113; del buf113 # reuse
# Topologically Sorted Source Nodes: [input_108, input_109], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf114, primals_113, 32768, grid=grid(32768), stream=stream0)
del primals_113
# Topologically Sorted Source Nodes: [input_110], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_111], Original ATen: [aten.convolution]
buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1))
buf117 = buf116; del buf116 # reuse
# Topologically Sorted Source Nodes: [input_111, input_112], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf117, primals_117, 32768, grid=grid(32768), stream=stream0)
del primals_117
# Topologically Sorted Source Nodes: [input_113], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1))
buf119 = buf118; del buf118 # reuse
# Topologically Sorted Source Nodes: [input_113, input_114], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf119, primals_119, 32768, grid=grid(32768), stream=stream0)
del primals_119
# Topologically Sorted Source Nodes: [input_115], Original ATen: [aten.convolution]
buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1))
buf121 = buf120; del buf120 # reuse
# Topologically Sorted Source Nodes: [input_115, input_116], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf121, primals_121, 32768, grid=grid(32768), stream=stream0)
del primals_121
# Topologically Sorted Source Nodes: [input_117], Original ATen: [aten.convolution]
buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1))
buf123 = buf122; del buf122 # reuse
# Topologically Sorted Source Nodes: [input_117, input_118], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf123, primals_123, 32768, grid=grid(32768), stream=stream0)
del primals_123
# Topologically Sorted Source Nodes: [input_119], Original ATen: [aten.convolution]
buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = buf124; del buf124 # reuse
# Topologically Sorted Source Nodes: [input_119, input_120], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf125, primals_125, 32768, grid=grid(32768), stream=stream0)
del primals_125
# Topologically Sorted Source Nodes: [input_121], Original ATen: [aten.convolution]
buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126; del buf126 # reuse
# Topologically Sorted Source Nodes: [input_121, input_122], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf127, primals_127, 32768, grid=grid(32768), stream=stream0)
del primals_127
# Topologically Sorted Source Nodes: [input_123], Original ATen: [aten.convolution]
buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1))
buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf115, primals_115, buf128, primals_129, buf29, buf129, 47360, grid=grid(47360), stream=stream0)
del buf115
del buf128
del primals_115
del primals_129
# Topologically Sorted Source Nodes: [input_124], Original ATen: [aten.convolution]
buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1))
buf131 = buf130; del buf130 # reuse
# Topologically Sorted Source Nodes: [input_124, input_125], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf131, primals_131, 32768, grid=grid(32768), stream=stream0)
del primals_131
# Topologically Sorted Source Nodes: [input_126], Original ATen: [aten.convolution]
buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1))
buf133 = buf132; del buf132 # reuse
# Topologically Sorted Source Nodes: [input_126, input_127], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf133, primals_133, 32768, grid=grid(32768), stream=stream0)
del primals_133
# Topologically Sorted Source Nodes: [input_128], Original ATen: [aten.convolution]
buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1))
buf135 = buf134; del buf134 # reuse
# Topologically Sorted Source Nodes: [input_128, input_129], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf135, primals_135, 32768, grid=grid(32768), stream=stream0)
del primals_135
# Topologically Sorted Source Nodes: [input_130], Original ATen: [aten.convolution]
buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1))
buf137 = buf136; del buf136 # reuse
# Topologically Sorted Source Nodes: [input_130, input_131], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf137, primals_137, 32768, grid=grid(32768), stream=stream0)
del primals_137
# Topologically Sorted Source Nodes: [input_132], Original ATen: [aten.convolution]
buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1))
buf139 = buf138; del buf138 # reuse
# Topologically Sorted Source Nodes: [input_132, input_133], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf139, primals_139, 32768, grid=grid(32768), stream=stream0)
del primals_139
# Topologically Sorted Source Nodes: [input_134], Original ATen: [aten.convolution]
buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140; del buf140 # reuse
# Topologically Sorted Source Nodes: [input_134, input_135], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf141, primals_141, 32768, grid=grid(32768), stream=stream0)
del primals_141
# Topologically Sorted Source Nodes: [input_136], Original ATen: [aten.convolution]
buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_137], Original ATen: [aten.convolution]
buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1))
buf144 = buf143; del buf143 # reuse
# Topologically Sorted Source Nodes: [input_137, input_138], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf144, primals_145, 32768, grid=grid(32768), stream=stream0)
del primals_145
# Topologically Sorted Source Nodes: [input_139], Original ATen: [aten.convolution]
buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1))
buf146 = buf145; del buf145 # reuse
# Topologically Sorted Source Nodes: [input_139, input_140], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf146, primals_147, 32768, grid=grid(32768), stream=stream0)
del primals_147
# Topologically Sorted Source Nodes: [input_141], Original ATen: [aten.convolution]
buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1))
buf148 = buf147; del buf147 # reuse
# Topologically Sorted Source Nodes: [input_141, input_142], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf148, primals_149, 32768, grid=grid(32768), stream=stream0)
del primals_149
# Topologically Sorted Source Nodes: [input_143], Original ATen: [aten.convolution]
buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1))
buf150 = buf149; del buf149 # reuse
# Topologically Sorted Source Nodes: [input_143, input_144], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf150, primals_151, 32768, grid=grid(32768), stream=stream0)
del primals_151
# Topologically Sorted Source Nodes: [input_145], Original ATen: [aten.convolution]
buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1))
buf152 = buf151; del buf151 # reuse
# Topologically Sorted Source Nodes: [input_145, input_146], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf152, primals_153, 32768, grid=grid(32768), stream=stream0)
del primals_153
# Topologically Sorted Source Nodes: [input_147], Original ATen: [aten.convolution]
buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1))
buf154 = buf153; del buf153 # reuse
# Topologically Sorted Source Nodes: [input_147, input_148], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf154, primals_155, 32768, grid=grid(32768), stream=stream0)
del primals_155
# Topologically Sorted Source Nodes: [input_149], Original ATen: [aten.convolution]
buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1))
buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out6], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf142, primals_143, buf155, primals_157, buf29, buf156, 47360, grid=grid(47360), stream=stream0)
del buf142
del buf155
del primals_143
del primals_157
# Topologically Sorted Source Nodes: [input_150], Original ATen: [aten.convolution]
buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1))
buf158 = buf157; del buf157 # reuse
# Topologically Sorted Source Nodes: [input_150, input_151], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf158, primals_159, 32768, grid=grid(32768), stream=stream0)
del primals_159
# Topologically Sorted Source Nodes: [input_152], Original ATen: [aten.convolution]
buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1))
buf160 = buf159; del buf159 # reuse
# Topologically Sorted Source Nodes: [input_152, input_153], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf160, primals_161, 32768, grid=grid(32768), stream=stream0)
del primals_161
# Topologically Sorted Source Nodes: [input_154], Original ATen: [aten.convolution]
buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1))
buf162 = buf161; del buf161 # reuse
# Topologically Sorted Source Nodes: [input_154, input_155], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf162, primals_163, 32768, grid=grid(32768), stream=stream0)
del primals_163
# Topologically Sorted Source Nodes: [input_156], Original ATen: [aten.convolution]
buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1))
buf164 = buf163; del buf163 # reuse
# Topologically Sorted Source Nodes: [input_156, input_157], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf164, primals_165, 32768, grid=grid(32768), stream=stream0)
del primals_165
# Topologically Sorted Source Nodes: [input_158], Original ATen: [aten.convolution]
buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1))
buf166 = buf165; del buf165 # reuse
# Topologically Sorted Source Nodes: [input_158, input_159], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf166, primals_167, 32768, grid=grid(32768), stream=stream0)
del primals_167
# Topologically Sorted Source Nodes: [input_160], Original ATen: [aten.convolution]
buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1))
buf168 = buf167; del buf167 # reuse
# Topologically Sorted Source Nodes: [input_160, input_161], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf168, primals_169, 32768, grid=grid(32768), stream=stream0)
del primals_169
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1))
buf170 = buf169; del buf169 # reuse
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf170, primals_171, 9728, grid=grid(9728), stream=stream0)
del primals_171
# Topologically Sorted Source Nodes: [input_163], Original ATen: [aten.convolution]
buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1))
buf172 = buf171; del buf171 # reuse
# Topologically Sorted Source Nodes: [input_163, input_164], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf172, primals_173, 32768, grid=grid(32768), stream=stream0)
del primals_173
# Topologically Sorted Source Nodes: [input_165], Original ATen: [aten.convolution]
buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1))
buf174 = buf173; del buf173 # reuse
# Topologically Sorted Source Nodes: [input_165, input_166], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf174, primals_175, 32768, grid=grid(32768), stream=stream0)
del primals_175
# Topologically Sorted Source Nodes: [input_167], Original ATen: [aten.convolution]
buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1))
buf176 = buf175; del buf175 # reuse
# Topologically Sorted Source Nodes: [input_167, input_168], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf176, primals_177, 32768, grid=grid(32768), stream=stream0)
del primals_177
# Topologically Sorted Source Nodes: [input_169], Original ATen: [aten.convolution]
buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1))
buf178 = buf177; del buf177 # reuse
# Topologically Sorted Source Nodes: [input_169, input_170], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf178, primals_179, 32768, grid=grid(32768), stream=stream0)
del primals_179
# Topologically Sorted Source Nodes: [input_171], Original ATen: [aten.convolution]
buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1))
buf180 = buf179; del buf179 # reuse
# Topologically Sorted Source Nodes: [input_171, input_172], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf180, primals_181, 32768, grid=grid(32768), stream=stream0)
del primals_181
# Topologically Sorted Source Nodes: [input_173], Original ATen: [aten.convolution]
buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1))
buf182 = buf181; del buf181 # reuse
# Topologically Sorted Source Nodes: [input_173, input_174], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf182, primals_183, 32768, grid=grid(32768), stream=stream0)
del primals_183
# Topologically Sorted Source Nodes: [input_175], Original ATen: [aten.convolution]
buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1))
buf184 = buf183; del buf183 # reuse
buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf184, primals_185, buf185, 4864, grid=grid(4864), stream=stream0)
del primals_185
return (buf170, buf184, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, primals_54, primals_56, primals_58, primals_60, primals_62, primals_64, primals_66, primals_68, primals_70, primals_72, primals_74, primals_76, primals_78, primals_80, primals_82, primals_84, primals_86, primals_88, primals_90, primals_92, primals_94, primals_96, primals_98, primals_100, primals_102, primals_104, primals_106, primals_108, primals_110, primals_112, primals_114, primals_116, primals_118, primals_120, primals_122, primals_124, primals_126, primals_128, primals_130, primals_132, primals_134, primals_136, primals_138, primals_140, primals_142, primals_144, primals_146, primals_148, primals_150, primals_152, primals_154, primals_156, primals_158, primals_160, primals_162, primals_164, primals_166, primals_168, primals_170, primals_172, primals_174, primals_176, primals_178, primals_180, primals_182, primals_184, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73, buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92, buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110, buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127, buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144, buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160, buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178, buf180, buf182, buf185, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((38, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((19, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_155 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_156 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_157 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_158 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_159 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_160 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_161 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_162 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_163 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_164 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_165 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_166 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_167 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_168 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_169 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_170 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_171 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_172 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_173 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_174 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_175 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_176 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_177 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_178 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_179 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_180 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_181 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_182 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_183 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_184 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_185 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 47360
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64 % 185
x0 = xindex % 64
x2 = xindex // 11840
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 38, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2432 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 57, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 64 * (-38 + x1) + 1216 * x2), tmp13 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + (-38 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1], 185, tl.int64)
tmp22 = tl.load(in_ptr4 + (x0 + 64 * (-57 + x1) + 8192 * x2), tmp19 &
xmask, other=0.0)
tmp23 = tl.where(tmp13, tmp18, tmp22)
tmp24 = tl.where(tmp4, tmp9, tmp23)
tl.store(out_ptr0 + x3, tmp24, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 38
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 64 % 19
x2 = xindex // 1216
x3 = xindex % 1216
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x3 + 1280 * x2), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170, primals_171, primals_172,
primals_173, primals_174, primals_175, primals_176, primals_177,
primals_178, primals_179, primals_180, primals_181, primals_182,
primals_183, primals_184, primals_185) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_29, (128,), (1,))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (38,), (1,))
assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (128,), (1,))
assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_43, (512,), (1,))
assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_45, (19,), (1,))
assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_49, (128,), (1,))
assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128,), (1,))
assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_57, (128,), (1,))
assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_59, (38,), (1,))
assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_61, (128,), (1,))
assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_63, (128,), (1,))
assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_65, (128,), (1,))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128,), (1,))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128,), (1,))
assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_71, (128,), (1,))
assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_73, (19,), (1,))
assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_75, (128,), (1,))
assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_77, (128,), (1,))
assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_79, (128,), (1,))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128,), (1,))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128,), (1,))
assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_85, (128,), (1,))
assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_87, (38,), (1,))
assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_89, (128,), (1,))
assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_91, (128,), (1,))
assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_93, (128,), (1,))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128,), (1,))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128,), (1,))
assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_99, (128,), (1,))
assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_101, (19,), (1,))
assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_103, (128,), (1,))
assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_105, (128,), (1,))
assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_107, (128,), (1,))
assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_109, (128,), (1,))
assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_111, (128,), (1,))
assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_113, (128,), (1,))
assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_115, (38,), (1,))
assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_117, (128,), (1,))
assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_119, (128,), (1,))
assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_121, (128,), (1,))
assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_123, (128,), (1,))
assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_125, (128,), (1,))
assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_127, (128,), (1,))
assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_129, (19,), (1,))
assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_131, (128,), (1,))
assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_133, (128,), (1,))
assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_135, (128,), (1,))
assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_137, (128,), (1,))
assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_139, (128,), (1,))
assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_141, (128,), (1,))
assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_143, (38,), (1,))
assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_145, (128,), (1,))
assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_147, (128,), (1,))
assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_149, (128,), (1,))
assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_151, (128,), (1,))
assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_153, (128,), (1,))
assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_155, (128,), (1,))
assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_157, (19,), (1,))
assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_159, (128,), (1,))
assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_161, (128,), (1,))
assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_163, (128,), (1,))
assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_165, (128,), (1,))
assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_167, (128,), (1,))
assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_169, (128,), (1,))
assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_171, (38,), (1,))
assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_173, (128,), (1,))
assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_175, (128,), (1,))
assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_177, (128,), (1,))
assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_179, (128,), (1,))
assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_181, (128,), (1,))
assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_183, (128,), (1,))
assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_185, (19,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4,
buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9,
buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19,
buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_7[grid(65536)](buf27, primals_23,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_8[grid(32768)](buf29, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_8[grid(32768)](buf31, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_8[grid(32768)](buf33, primals_29,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_29
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_8[grid(32768)](buf35, primals_31,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_6[grid(131072)](buf37, primals_33,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_33
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1))
buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1))
buf40 = buf39
del buf39
triton_poi_fused_convolution_relu_8[grid(32768)](buf40, primals_37,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_37
buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1))
buf42 = buf41
del buf41
triton_poi_fused_convolution_relu_8[grid(32768)](buf42, primals_39,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_39
buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = buf43
del buf43
triton_poi_fused_convolution_relu_8[grid(32768)](buf44, primals_41,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1))
buf46 = buf45
del buf45
triton_poi_fused_convolution_relu_6[grid(131072)](buf46, primals_43,
131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1))
buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(47360)](buf38, primals_35, buf47,
primals_45, buf29, buf48, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf38
del buf47
del primals_35
del primals_45
buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1))
buf50 = buf49
del buf49
triton_poi_fused_convolution_relu_8[grid(32768)](buf50, primals_47,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1))
buf52 = buf51
del buf51
triton_poi_fused_convolution_relu_8[grid(32768)](buf52, primals_49,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_49
buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1))
buf54 = buf53
del buf53
triton_poi_fused_convolution_relu_8[grid(32768)](buf54, primals_51,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_51
buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_relu_8[grid(32768)](buf56, primals_53,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_53
buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1))
buf58 = buf57
del buf57
triton_poi_fused_convolution_relu_8[grid(32768)](buf58, primals_55,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_55
buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1))
buf60 = buf59
del buf59
triton_poi_fused_convolution_relu_8[grid(32768)](buf60, primals_57,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_57
buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1))
buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62
del buf62
triton_poi_fused_convolution_relu_8[grid(32768)](buf63, primals_61,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_61
buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64
del buf64
triton_poi_fused_convolution_relu_8[grid(32768)](buf65, primals_63,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_63
buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1))
buf67 = buf66
del buf66
triton_poi_fused_convolution_relu_8[grid(32768)](buf67, primals_65,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_65
buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68
del buf68
triton_poi_fused_convolution_relu_8[grid(32768)](buf69, primals_67,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_67
buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70
del buf70
triton_poi_fused_convolution_relu_8[grid(32768)](buf71, primals_69,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_69
buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72
del buf72
triton_poi_fused_convolution_relu_8[grid(32768)](buf73, primals_71,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_71
buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1))
buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(47360)](buf61, primals_59, buf74,
primals_73, buf29, buf75, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf61
del buf74
del primals_59
del primals_73
buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76
del buf76
triton_poi_fused_convolution_relu_8[grid(32768)](buf77, primals_75,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_75
buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78
del buf78
triton_poi_fused_convolution_relu_8[grid(32768)](buf79, primals_77,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_77
buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1))
buf81 = buf80
del buf80
triton_poi_fused_convolution_relu_8[grid(32768)](buf81, primals_79,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_79
buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82
del buf82
triton_poi_fused_convolution_relu_8[grid(32768)](buf83, primals_81,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_81
buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84
del buf84
triton_poi_fused_convolution_relu_8[grid(32768)](buf85, primals_83,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_83
buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86
del buf86
triton_poi_fused_convolution_relu_8[grid(32768)](buf87, primals_85,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_85
buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1))
buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1))
buf90 = buf89
del buf89
triton_poi_fused_convolution_relu_8[grid(32768)](buf90, primals_89,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_89
buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1))
buf92 = buf91
del buf91
triton_poi_fused_convolution_relu_8[grid(32768)](buf92, primals_91,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_91
buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1))
buf94 = buf93
del buf93
triton_poi_fused_convolution_relu_8[grid(32768)](buf94, primals_93,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_93
buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1))
buf96 = buf95
del buf95
triton_poi_fused_convolution_relu_8[grid(32768)](buf96, primals_95,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_95
buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1))
buf98 = buf97
del buf97
triton_poi_fused_convolution_relu_8[grid(32768)](buf98, primals_97,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_97
buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1))
buf100 = buf99
del buf99
triton_poi_fused_convolution_relu_8[grid(32768)](buf100, primals_99,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_99
buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1))
buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf88, primals_87, buf101,
primals_101, buf29, buf102, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf101
del buf88
del primals_101
del primals_87
buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1))
buf104 = buf103
del buf103
triton_poi_fused_convolution_relu_8[grid(32768)](buf104,
primals_103, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_103
buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1))
buf106 = buf105
del buf105
triton_poi_fused_convolution_relu_8[grid(32768)](buf106,
primals_105, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_105
buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1))
buf108 = buf107
del buf107
triton_poi_fused_convolution_relu_8[grid(32768)](buf108,
primals_107, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_107
buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1))
buf110 = buf109
del buf109
triton_poi_fused_convolution_relu_8[grid(32768)](buf110,
primals_109, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_109
buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1))
buf112 = buf111
del buf111
triton_poi_fused_convolution_relu_8[grid(32768)](buf112,
primals_111, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_111
buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1))
buf114 = buf113
del buf113
triton_poi_fused_convolution_relu_8[grid(32768)](buf114,
primals_113, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_113
buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1))
buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1))
buf117 = buf116
del buf116
triton_poi_fused_convolution_relu_8[grid(32768)](buf117,
primals_117, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_117
buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1))
buf119 = buf118
del buf118
triton_poi_fused_convolution_relu_8[grid(32768)](buf119,
primals_119, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_119
buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1))
buf121 = buf120
del buf120
triton_poi_fused_convolution_relu_8[grid(32768)](buf121,
primals_121, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_121
buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1))
buf123 = buf122
del buf122
triton_poi_fused_convolution_relu_8[grid(32768)](buf123,
primals_123, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_123
buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = buf124
del buf124
triton_poi_fused_convolution_relu_8[grid(32768)](buf125,
primals_125, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_125
buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126
del buf126
triton_poi_fused_convolution_relu_8[grid(32768)](buf127,
primals_127, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_127
buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1))
buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf115, primals_115, buf128,
primals_129, buf29, buf129, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf115
del buf128
del primals_115
del primals_129
buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1))
buf131 = buf130
del buf130
triton_poi_fused_convolution_relu_8[grid(32768)](buf131,
primals_131, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_131
buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1))
buf133 = buf132
del buf132
triton_poi_fused_convolution_relu_8[grid(32768)](buf133,
primals_133, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_133
buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1))
buf135 = buf134
del buf134
triton_poi_fused_convolution_relu_8[grid(32768)](buf135,
primals_135, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_135
buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1))
buf137 = buf136
del buf136
triton_poi_fused_convolution_relu_8[grid(32768)](buf137,
primals_137, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_137
buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1))
buf139 = buf138
del buf138
triton_poi_fused_convolution_relu_8[grid(32768)](buf139,
primals_139, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_139
buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140
del buf140
triton_poi_fused_convolution_relu_8[grid(32768)](buf141,
primals_141, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_141
buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1))
buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1))
buf144 = buf143
del buf143
triton_poi_fused_convolution_relu_8[grid(32768)](buf144,
primals_145, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_145
buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1))
buf146 = buf145
del buf145
triton_poi_fused_convolution_relu_8[grid(32768)](buf146,
primals_147, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_147
buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1))
buf148 = buf147
del buf147
triton_poi_fused_convolution_relu_8[grid(32768)](buf148,
primals_149, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_149
buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1))
buf150 = buf149
del buf149
triton_poi_fused_convolution_relu_8[grid(32768)](buf150,
primals_151, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_151
buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1))
buf152 = buf151
del buf151
triton_poi_fused_convolution_relu_8[grid(32768)](buf152,
primals_153, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_153
buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1))
buf154 = buf153
del buf153
triton_poi_fused_convolution_relu_8[grid(32768)](buf154,
primals_155, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_155
buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1))
buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf142, primals_143, buf155,
primals_157, buf29, buf156, 47360, XBLOCK=512, num_warps=4,
num_stages=1)
del buf142
del buf155
del primals_143
del primals_157
buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1))
buf158 = buf157
del buf157
triton_poi_fused_convolution_relu_8[grid(32768)](buf158,
primals_159, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_159
buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1))
buf160 = buf159
del buf159
triton_poi_fused_convolution_relu_8[grid(32768)](buf160,
primals_161, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_161
buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1))
buf162 = buf161
del buf161
triton_poi_fused_convolution_relu_8[grid(32768)](buf162,
primals_163, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_163
buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1))
buf164 = buf163
del buf163
triton_poi_fused_convolution_relu_8[grid(32768)](buf164,
primals_165, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_165
buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1))
buf166 = buf165
del buf165
triton_poi_fused_convolution_relu_8[grid(32768)](buf166,
primals_167, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_167
buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1))
buf168 = buf167
del buf167
triton_poi_fused_convolution_relu_8[grid(32768)](buf168,
primals_169, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_169
buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1))
buf170 = buf169
del buf169
triton_poi_fused_convolution_10[grid(9728)](buf170, primals_171,
9728, XBLOCK=256, num_warps=4, num_stages=1)
del primals_171
buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1))
buf172 = buf171
del buf171
triton_poi_fused_convolution_relu_8[grid(32768)](buf172,
primals_173, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_173
buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1))
buf174 = buf173
del buf173
triton_poi_fused_convolution_relu_8[grid(32768)](buf174,
primals_175, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_175
buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1))
buf176 = buf175
del buf175
triton_poi_fused_convolution_relu_8[grid(32768)](buf176,
primals_177, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_177
buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1))
buf178 = buf177
del buf177
triton_poi_fused_convolution_relu_8[grid(32768)](buf178,
primals_179, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_179
buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1))
buf180 = buf179
del buf179
triton_poi_fused_convolution_relu_8[grid(32768)](buf180,
primals_181, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_181
buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1))
buf182 = buf181
del buf181
triton_poi_fused_convolution_relu_8[grid(32768)](buf182,
primals_183, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_183
buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1))
buf184 = buf183
del buf183
buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(4864)](
buf184, primals_185, buf185, 4864, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_185
return (buf170, buf184, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_34, primals_36,
primals_38, primals_40, primals_42, primals_44, primals_46,
primals_48, primals_50, primals_52, primals_54, primals_56,
primals_58, primals_60, primals_62, primals_64, primals_66,
primals_68, primals_70, primals_72, primals_74, primals_76,
primals_78, primals_80, primals_82, primals_84, primals_86,
primals_88, primals_90, primals_92, primals_94, primals_96,
primals_98, primals_100, primals_102, primals_104, primals_106,
primals_108, primals_110, primals_112, primals_114, primals_116,
primals_118, primals_120, primals_122, primals_124, primals_126,
primals_128, primals_130, primals_132, primals_134, primals_136,
primals_138, primals_140, primals_142, primals_144, primals_146,
primals_148, primals_150, primals_152, primals_154, primals_156,
primals_158, primals_160, primals_162, primals_164, primals_166,
primals_168, primals_170, primals_172, primals_174, primals_176,
primals_178, primals_180, primals_182, primals_184, buf1, buf3,
buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19,
buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35,
buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54,
buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73,
buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92,
buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110,
buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127,
buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144,
buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160,
buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178,
buf180, buf182, buf185)
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class bodypose_modelNew(nn.Module):
def __init__(self):
super(bodypose_modelNew, self).__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2',
'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1',
'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2',
'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1',
'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict({'conv1_1': [3, 64, 3, 1, 1], 'conv1_2': [64,
64, 3, 1, 1], 'pool1_stage1': [2, 2, 0], 'conv2_1': [64, 128, 3,
1, 1], 'conv2_2': [128, 128, 3, 1, 1], 'pool2_stage1': [2, 2, 0
], 'conv3_1': [128, 256, 3, 1, 1], 'conv3_2': [256, 256, 3, 1,
1], 'conv3_3': [256, 256, 3, 1, 1], 'conv3_4': [256, 256, 3, 1,
1], 'pool3_stage1': [2, 2, 0], 'conv4_1': [256, 512, 3, 1, 1],
'conv4_2': [512, 512, 3, 1, 1], 'conv4_3_CPM': [512, 256, 3, 1,
1], 'conv4_4_CPM': [256, 128, 3, 1, 1]})
block1_1 = OrderedDict({'conv5_1_CPM_L1': [128, 128, 3, 1, 1],
'conv5_2_CPM_L1': [128, 128, 3, 1, 1], 'conv5_3_CPM_L1': [128,
128, 3, 1, 1], 'conv5_4_CPM_L1': [128, 512, 1, 1, 0],
'conv5_5_CPM_L1': [512, 38, 1, 1, 0]})
block1_2 = OrderedDict({'conv5_1_CPM_L2': [128, 128, 3, 1, 1],
'conv5_2_CPM_L2': [128, 128, 3, 1, 1], 'conv5_3_CPM_L2': [128,
128, 3, 1, 1], 'conv5_4_CPM_L2': [128, 512, 1, 1, 0],
'conv5_5_CPM_L2': [512, 19, 1, 1, 0]})
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict({('Mconv1_stage%d_L1' % i
): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L1' % i): [128,
128, 7, 1, 3], ('Mconv3_stage%d_L1' % i): [128, 128, 7, 1,
3], ('Mconv4_stage%d_L1' % i): [128, 128, 7, 1, 3], (
'Mconv5_stage%d_L1' % i): [128, 128, 7, 1, 3], (
'Mconv6_stage%d_L1' % i): [128, 128, 1, 1, 0], (
'Mconv7_stage%d_L1' % i): [128, 38, 1, 1, 0]})
blocks['block%d_2' % i] = OrderedDict({('Mconv1_stage%d_L2' % i
): [185, 128, 7, 1, 3], ('Mconv2_stage%d_L2' % i): [128,
128, 7, 1, 3], ('Mconv3_stage%d_L2' % i): [128, 128, 7, 1,
3], ('Mconv4_stage%d_L2' % i): [128, 128, 7, 1, 3], (
'Mconv5_stage%d_L2' % i): [128, 128, 7, 1, 3], (
'Mconv6_stage%d_L2' % i): [128, 128, 1, 1, 0], (
'Mconv7_stage%d_L2' % i): [128, 19, 1, 1, 0]})
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, input_0):
primals_1 = self.model0.conv1_1.weight
primals_2 = self.model0.conv1_1.bias
primals_4 = self.model0.conv1_2.weight
primals_5 = self.model0.conv1_2.bias
primals_6 = self.model0.conv2_1.weight
primals_7 = self.model0.conv2_1.bias
primals_8 = self.model0.conv2_2.weight
primals_9 = self.model0.conv2_2.bias
primals_10 = self.model0.conv3_1.weight
primals_11 = self.model0.conv3_1.bias
primals_12 = self.model0.conv3_2.weight
primals_13 = self.model0.conv3_2.bias
primals_14 = self.model0.conv3_3.weight
primals_15 = self.model0.conv3_3.bias
primals_16 = self.model0.conv3_4.weight
primals_17 = self.model0.conv3_4.bias
primals_18 = self.model0.conv4_1.weight
primals_19 = self.model0.conv4_1.bias
primals_20 = self.model0.conv4_2.weight
primals_21 = self.model0.conv4_2.bias
primals_22 = self.model0.conv4_3_CPM.weight
primals_23 = self.model0.conv4_3_CPM.bias
primals_24 = self.model0.conv4_4_CPM.weight
primals_25 = self.model0.conv4_4_CPM.bias
primals_26 = self.model1_1.conv5_1_CPM_L1.weight
primals_27 = self.model1_1.conv5_1_CPM_L1.bias
primals_28 = self.model1_1.conv5_2_CPM_L1.weight
primals_29 = self.model1_1.conv5_2_CPM_L1.bias
primals_30 = self.model1_1.conv5_3_CPM_L1.weight
primals_31 = self.model1_1.conv5_3_CPM_L1.bias
primals_32 = self.model1_1.conv5_4_CPM_L1.weight
primals_33 = self.model1_1.conv5_4_CPM_L1.bias
primals_34 = self.model1_1.conv5_5_CPM_L1.weight
primals_35 = self.model1_1.conv5_5_CPM_L1.bias
primals_46 = self.model2_1.Mconv1_stage2_L1.weight
primals_37 = self.model2_1.Mconv1_stage2_L1.bias
primals_48 = self.model2_1.Mconv2_stage2_L1.weight
primals_39 = self.model2_1.Mconv2_stage2_L1.bias
primals_50 = self.model2_1.Mconv3_stage2_L1.weight
primals_41 = self.model2_1.Mconv3_stage2_L1.bias
primals_52 = self.model2_1.Mconv4_stage2_L1.weight
primals_47 = self.model2_1.Mconv4_stage2_L1.bias
primals_54 = self.model2_1.Mconv5_stage2_L1.weight
primals_49 = self.model2_1.Mconv5_stage2_L1.bias
primals_56 = self.model2_1.Mconv6_stage2_L1.weight
primals_51 = self.model2_1.Mconv6_stage2_L1.bias
primals_58 = self.model2_1.Mconv7_stage2_L1.weight
primals_59 = self.model2_1.Mconv7_stage2_L1.bias
primals_60 = self.model3_1.Mconv1_stage3_L1.weight
primals_53 = self.model3_1.Mconv1_stage3_L1.bias
primals_62 = self.model3_1.Mconv2_stage3_L1.weight
primals_55 = self.model3_1.Mconv2_stage3_L1.bias
primals_64 = self.model3_1.Mconv3_stage3_L1.weight
primals_57 = self.model3_1.Mconv3_stage3_L1.bias
primals_66 = self.model3_1.Mconv4_stage3_L1.weight
primals_61 = self.model3_1.Mconv4_stage3_L1.bias
primals_68 = self.model3_1.Mconv5_stage3_L1.weight
primals_63 = self.model3_1.Mconv5_stage3_L1.bias
primals_70 = self.model3_1.Mconv6_stage3_L1.weight
primals_65 = self.model3_1.Mconv6_stage3_L1.bias
primals_86 = self.model3_1.Mconv7_stage3_L1.weight
primals_87 = self.model3_1.Mconv7_stage3_L1.bias
primals_74 = self.model4_1.Mconv1_stage4_L1.weight
primals_67 = self.model4_1.Mconv1_stage4_L1.bias
primals_76 = self.model4_1.Mconv2_stage4_L1.weight
primals_69 = self.model4_1.Mconv2_stage4_L1.bias
primals_78 = self.model4_1.Mconv3_stage4_L1.weight
primals_71 = self.model4_1.Mconv3_stage4_L1.bias
primals_80 = self.model4_1.Mconv4_stage4_L1.weight
primals_75 = self.model4_1.Mconv4_stage4_L1.bias
primals_82 = self.model4_1.Mconv5_stage4_L1.weight
primals_77 = self.model4_1.Mconv5_stage4_L1.bias
primals_84 = self.model4_1.Mconv6_stage4_L1.weight
primals_79 = self.model4_1.Mconv6_stage4_L1.bias
primals_114 = self.model4_1.Mconv7_stage4_L1.weight
primals_115 = self.model4_1.Mconv7_stage4_L1.bias
primals_88 = self.model5_1.Mconv1_stage5_L1.weight
primals_81 = self.model5_1.Mconv1_stage5_L1.bias
primals_90 = self.model5_1.Mconv2_stage5_L1.weight
primals_83 = self.model5_1.Mconv2_stage5_L1.bias
primals_92 = self.model5_1.Mconv3_stage5_L1.weight
primals_85 = self.model5_1.Mconv3_stage5_L1.bias
primals_94 = self.model5_1.Mconv4_stage5_L1.weight
primals_89 = self.model5_1.Mconv4_stage5_L1.bias
primals_96 = self.model5_1.Mconv5_stage5_L1.weight
primals_91 = self.model5_1.Mconv5_stage5_L1.bias
primals_98 = self.model5_1.Mconv6_stage5_L1.weight
primals_93 = self.model5_1.Mconv6_stage5_L1.bias
primals_142 = self.model5_1.Mconv7_stage5_L1.weight
primals_143 = self.model5_1.Mconv7_stage5_L1.bias
primals_102 = self.model6_1.Mconv1_stage6_L1.weight
primals_95 = self.model6_1.Mconv1_stage6_L1.bias
primals_104 = self.model6_1.Mconv2_stage6_L1.weight
primals_97 = self.model6_1.Mconv2_stage6_L1.bias
primals_106 = self.model6_1.Mconv3_stage6_L1.weight
primals_99 = self.model6_1.Mconv3_stage6_L1.bias
primals_108 = self.model6_1.Mconv4_stage6_L1.weight
primals_103 = self.model6_1.Mconv4_stage6_L1.bias
primals_110 = self.model6_1.Mconv5_stage6_L1.weight
primals_105 = self.model6_1.Mconv5_stage6_L1.bias
primals_112 = self.model6_1.Mconv6_stage6_L1.weight
primals_107 = self.model6_1.Mconv6_stage6_L1.bias
primals_170 = self.model6_1.Mconv7_stage6_L1.weight
primals_171 = self.model6_1.Mconv7_stage6_L1.bias
primals_36 = self.model1_2.conv5_1_CPM_L2.weight
primals_109 = self.model1_2.conv5_1_CPM_L2.bias
primals_38 = self.model1_2.conv5_2_CPM_L2.weight
primals_111 = self.model1_2.conv5_2_CPM_L2.bias
primals_40 = self.model1_2.conv5_3_CPM_L2.weight
primals_113 = self.model1_2.conv5_3_CPM_L2.bias
primals_42 = self.model1_2.conv5_4_CPM_L2.weight
primals_43 = self.model1_2.conv5_4_CPM_L2.bias
primals_44 = self.model1_2.conv5_5_CPM_L2.weight
primals_45 = self.model1_2.conv5_5_CPM_L2.bias
primals_116 = self.model2_2.Mconv1_stage2_L2.weight
primals_117 = self.model2_2.Mconv1_stage2_L2.bias
primals_118 = self.model2_2.Mconv2_stage2_L2.weight
primals_119 = self.model2_2.Mconv2_stage2_L2.bias
primals_120 = self.model2_2.Mconv3_stage2_L2.weight
primals_121 = self.model2_2.Mconv3_stage2_L2.bias
primals_122 = self.model2_2.Mconv4_stage2_L2.weight
primals_123 = self.model2_2.Mconv4_stage2_L2.bias
primals_124 = self.model2_2.Mconv5_stage2_L2.weight
primals_125 = self.model2_2.Mconv5_stage2_L2.bias
primals_126 = self.model2_2.Mconv6_stage2_L2.weight
primals_127 = self.model2_2.Mconv6_stage2_L2.bias
primals_72 = self.model2_2.Mconv7_stage2_L2.weight
primals_73 = self.model2_2.Mconv7_stage2_L2.bias
primals_130 = self.model3_2.Mconv1_stage3_L2.weight
primals_131 = self.model3_2.Mconv1_stage3_L2.bias
primals_132 = self.model3_2.Mconv2_stage3_L2.weight
primals_133 = self.model3_2.Mconv2_stage3_L2.bias
primals_134 = self.model3_2.Mconv3_stage3_L2.weight
primals_135 = self.model3_2.Mconv3_stage3_L2.bias
primals_136 = self.model3_2.Mconv4_stage3_L2.weight
primals_137 = self.model3_2.Mconv4_stage3_L2.bias
primals_138 = self.model3_2.Mconv5_stage3_L2.weight
primals_139 = self.model3_2.Mconv5_stage3_L2.bias
primals_140 = self.model3_2.Mconv6_stage3_L2.weight
primals_141 = self.model3_2.Mconv6_stage3_L2.bias
primals_100 = self.model3_2.Mconv7_stage3_L2.weight
primals_101 = self.model3_2.Mconv7_stage3_L2.bias
primals_144 = self.model4_2.Mconv1_stage4_L2.weight
primals_145 = self.model4_2.Mconv1_stage4_L2.bias
primals_146 = self.model4_2.Mconv2_stage4_L2.weight
primals_147 = self.model4_2.Mconv2_stage4_L2.bias
primals_148 = self.model4_2.Mconv3_stage4_L2.weight
primals_149 = self.model4_2.Mconv3_stage4_L2.bias
primals_150 = self.model4_2.Mconv4_stage4_L2.weight
primals_151 = self.model4_2.Mconv4_stage4_L2.bias
primals_152 = self.model4_2.Mconv5_stage4_L2.weight
primals_153 = self.model4_2.Mconv5_stage4_L2.bias
primals_154 = self.model4_2.Mconv6_stage4_L2.weight
primals_155 = self.model4_2.Mconv6_stage4_L2.bias
primals_128 = self.model4_2.Mconv7_stage4_L2.weight
primals_129 = self.model4_2.Mconv7_stage4_L2.bias
primals_158 = self.model5_2.Mconv1_stage5_L2.weight
primals_159 = self.model5_2.Mconv1_stage5_L2.bias
primals_160 = self.model5_2.Mconv2_stage5_L2.weight
primals_161 = self.model5_2.Mconv2_stage5_L2.bias
primals_162 = self.model5_2.Mconv3_stage5_L2.weight
primals_163 = self.model5_2.Mconv3_stage5_L2.bias
primals_164 = self.model5_2.Mconv4_stage5_L2.weight
primals_165 = self.model5_2.Mconv4_stage5_L2.bias
primals_166 = self.model5_2.Mconv5_stage5_L2.weight
primals_167 = self.model5_2.Mconv5_stage5_L2.bias
primals_168 = self.model5_2.Mconv6_stage5_L2.weight
primals_169 = self.model5_2.Mconv6_stage5_L2.bias
primals_156 = self.model5_2.Mconv7_stage5_L2.weight
primals_157 = self.model5_2.Mconv7_stage5_L2.bias
primals_172 = self.model6_2.Mconv1_stage6_L2.weight
primals_173 = self.model6_2.Mconv1_stage6_L2.bias
primals_174 = self.model6_2.Mconv2_stage6_L2.weight
primals_175 = self.model6_2.Mconv2_stage6_L2.bias
primals_176 = self.model6_2.Mconv3_stage6_L2.weight
primals_177 = self.model6_2.Mconv3_stage6_L2.bias
primals_178 = self.model6_2.Mconv4_stage6_L2.weight
primals_179 = self.model6_2.Mconv4_stage6_L2.bias
primals_180 = self.model6_2.Mconv5_stage6_L2.weight
primals_181 = self.model6_2.Mconv5_stage6_L2.bias
primals_182 = self.model6_2.Mconv6_stage6_L2.weight
primals_183 = self.model6_2.Mconv6_stage6_L2.bias
primals_184 = self.model6_2.Mconv7_stage6_L2.weight
primals_185 = self.model6_2.Mconv7_stage6_L2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170, primals_171, primals_172, primals_173, primals_174,
primals_175, primals_176, primals_177, primals_178, primals_179,
primals_180, primals_181, primals_182, primals_183, primals_184,
primals_185])
return output[0], output[1]
|
KamaljeetSahoo/6thSense
|
bodypose_model
| false
| 9,367
|
[
"Unlicense",
"MIT"
] | 0
|
db1f2cd2bb7858410c128a6d11cfbdf8ea69e691
|
https://github.com/KamaljeetSahoo/6thSense/tree/db1f2cd2bb7858410c128a6d11cfbdf8ea69e691
|
BinaryExpSquare
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpSquare(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, x):
return torch.exp(-self.beta * torch.square(x[0] - x[1]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = -tmp5
tmp7 = tmp6 * tmp3
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_neg_pow_sub_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
return buf1, buf0, buf1
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpSquareNew(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Johnsonms/NNI_master
|
BinaryExpSquare
| false
| 11,584
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
CRFOutputLayer
|
import torch
import torch.nn as nn
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayer(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayer, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def forward(self, hidden):
feats = self.output_projection(hidden)
return self.crf(feats)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp13 = tl.load(in_ptr2 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp16 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + 2)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK])
tmp23 = tl.load(in_ptr2 + 2)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp26 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (3 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr1 + 3)
tmp31 = tl.broadcast_to(tmp30, [XBLOCK])
tmp33 = tl.load(in_ptr2 + 3)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK])
tmp36 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp12 = tmp9 + tmp11
tmp15 = tmp12 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp22 = tmp19 + tmp21
tmp25 = tmp22 + tmp24
tmp27 = tmp25 + tmp26
tmp28 = triton_helpers.maximum(tmp18, tmp27)
tmp32 = tmp29 + tmp31
tmp35 = tmp32 + tmp34
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp28, tmp37)
tmp39 = tmp8 > tmp17
tmp40 = tmp8 == tmp17
tmp41 = tmp8 != tmp8
tmp42 = tmp17 != tmp17
tmp43 = tmp41 > tmp42
tmp44 = tmp39 | tmp43
tmp45 = tmp41 & tmp42
tmp46 = tmp40 | tmp45
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.full([1], 1, tl.int64)
tmp49 = tmp47 < tmp48
tmp50 = tmp46 & tmp49
tmp51 = tmp44 | tmp50
tmp52 = tl.where(tmp51, tmp8, tmp17)
tmp53 = tl.where(tmp51, tmp47, tmp48)
tmp54 = tmp52 > tmp27
tmp55 = tmp52 == tmp27
tmp56 = tmp52 != tmp52
tmp57 = tmp27 != tmp27
tmp58 = tmp56 > tmp57
tmp59 = tmp54 | tmp58
tmp60 = tmp56 & tmp57
tmp61 = tmp55 | tmp60
tmp62 = tl.full([1], 2, tl.int64)
tmp63 = tmp53 < tmp62
tmp64 = tmp61 & tmp63
tmp65 = tmp59 | tmp64
tmp66 = tl.where(tmp65, tmp52, tmp27)
tmp67 = tl.where(tmp65, tmp53, tmp62)
tmp68 = tmp66 > tmp37
tmp69 = tmp66 == tmp37
tmp70 = tmp66 != tmp66
tmp71 = tmp37 != tmp37
tmp72 = tmp70 > tmp71
tmp73 = tmp68 | tmp72
tmp74 = tmp70 & tmp71
tmp75 = tmp69 | tmp74
tmp76 = tl.full([1], 3, tl.int64)
tmp77 = tmp67 < tmp76
tmp78 = tmp75 & tmp77
tmp79 = tmp73 | tmp78
tl.where(tmp79, tmp66, tmp37)
tmp81 = tl.where(tmp79, tmp67, tmp76)
tl.store(out_ptr0 + x2, tmp38, xmask)
tl.store(out_ptr1 + x2, tmp81, xmask)
@triton.jit
def triton_poi_fused_add_max_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (5 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (6 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (7 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_max_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (9 + 16 * x1), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr2 + 1)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (10 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr2 + 2)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK])
tmp23 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (11 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr2 + 3)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp32 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp12 = tmp9 + tmp11
tmp13 = tmp8 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp7, tmp15)
tmp21 = tmp18 + tmp20
tmp22 = tmp17 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = triton_helpers.maximum(tmp16, tmp24)
tmp30 = tmp27 + tmp29
tmp31 = tmp26 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp33)
tmp35 = tmp7 > tmp15
tmp36 = tmp7 == tmp15
tmp37 = tmp7 != tmp7
tmp38 = tmp15 != tmp15
tmp39 = tmp37 > tmp38
tmp40 = tmp35 | tmp39
tmp41 = tmp37 & tmp38
tmp42 = tmp36 | tmp41
tmp43 = tl.full([1], 0, tl.int64)
tmp44 = tl.full([1], 1, tl.int64)
tmp45 = tmp43 < tmp44
tmp46 = tmp42 & tmp45
tmp47 = tmp40 | tmp46
tmp48 = tl.where(tmp47, tmp7, tmp15)
tmp49 = tl.where(tmp47, tmp43, tmp44)
tmp50 = tmp48 > tmp24
tmp51 = tmp48 == tmp24
tmp52 = tmp48 != tmp48
tmp53 = tmp24 != tmp24
tmp54 = tmp52 > tmp53
tmp55 = tmp50 | tmp54
tmp56 = tmp52 & tmp53
tmp57 = tmp51 | tmp56
tmp58 = tl.full([1], 2, tl.int64)
tmp59 = tmp49 < tmp58
tmp60 = tmp57 & tmp59
tmp61 = tmp55 | tmp60
tmp62 = tl.where(tmp61, tmp48, tmp24)
tmp63 = tl.where(tmp61, tmp49, tmp58)
tmp64 = tmp62 > tmp33
tmp65 = tmp62 == tmp33
tmp66 = tmp62 != tmp62
tmp67 = tmp33 != tmp33
tmp68 = tmp66 > tmp67
tmp69 = tmp64 | tmp68
tmp70 = tmp66 & tmp67
tmp71 = tmp65 | tmp70
tmp72 = tl.full([1], 3, tl.int64)
tmp73 = tmp63 < tmp72
tmp74 = tmp71 & tmp73
tmp75 = tmp69 | tmp74
tl.where(tmp75, tmp62, tmp33)
tmp77 = tl.where(tmp75, tmp63, tmp72)
tl.store(out_ptr0 + x2, tmp34, xmask)
tl.store(out_ptr1 + x2, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_gather_max_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + 1)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr3 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr2 + 2)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp39 = tl.load(in_ptr3 + 2)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK])
tmp56 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr2 + 3)
tmp59 = tl.broadcast_to(tmp58, [XBLOCK])
tmp62 = tl.load(in_ptr3 + 3)
tmp63 = tl.broadcast_to(tmp62, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp5 + tmp7
tmp13 = tmp10 + tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp14 + tmp16
tmp18 = tmp8 > tmp17
tmp19 = tmp8 == tmp17
tmp20 = tmp8 != tmp8
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 0, tl.int64)
tmp27 = tl.full([1], 1, tl.int64)
tmp28 = tmp26 < tmp27
tmp29 = tmp25 & tmp28
tmp30 = tmp23 | tmp29
tmp31 = tl.where(tmp30, tmp8, tmp17)
tmp32 = tl.where(tmp30, tmp26, tmp27)
tmp37 = tmp34 + tmp36
tmp38 = tmp33 + tmp37
tmp41 = tmp38 + tmp40
tmp42 = tmp31 > tmp41
tmp43 = tmp31 == tmp41
tmp44 = tmp31 != tmp31
tmp45 = tmp41 != tmp41
tmp46 = tmp44 > tmp45
tmp47 = tmp42 | tmp46
tmp48 = tmp44 & tmp45
tmp49 = tmp43 | tmp48
tmp50 = tl.full([1], 2, tl.int64)
tmp51 = tmp32 < tmp50
tmp52 = tmp49 & tmp51
tmp53 = tmp47 | tmp52
tmp54 = tl.where(tmp53, tmp31, tmp41)
tmp55 = tl.where(tmp53, tmp32, tmp50)
tmp60 = tmp57 + tmp59
tmp61 = tmp56 + tmp60
tmp64 = tmp61 + tmp63
tmp65 = tmp54 > tmp64
tmp66 = tmp54 == tmp64
tmp67 = tmp54 != tmp54
tmp68 = tmp64 != tmp64
tmp69 = tmp67 > tmp68
tmp70 = tmp65 | tmp69
tmp71 = tmp67 & tmp68
tmp72 = tmp66 | tmp71
tmp73 = tl.full([1], 3, tl.int64)
tmp74 = tmp55 < tmp73
tmp75 = tmp72 & tmp74
tmp76 = tmp70 | tmp75
tl.where(tmp76, tmp54, tmp64)
tmp78 = tl.where(tmp76, tmp55, tmp73)
tmp79 = tl.full([XBLOCK], 4, tl.int32)
tmp80 = tmp78 + tmp79
tmp81 = tmp78 < 0
tmp82 = tl.where(tmp81, tmp80, tmp78)
tl.device_assert((0 <= tmp82) & (tmp82 < 4) | ~xmask,
'index out of bounds: 0 <= tmp82 < 4')
tmp84 = tl.load(in_ptr4 + (tmp82 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp85 = tmp84 + tmp79
tmp86 = tmp84 < 0
tmp87 = tl.where(tmp86, tmp85, tmp84)
tl.device_assert((0 <= tmp87) & (tmp87 < 4) | ~xmask,
'index out of bounds: 0 <= tmp87 < 4')
tmp89 = tl.load(in_ptr5 + (tmp87 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp90 = tmp89 + tmp79
tmp91 = tmp89 < 0
tmp92 = tl.where(tmp91, tmp90, tmp89)
tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask,
'index out of bounds: 0 <= tmp92 < 4')
tmp94 = tl.load(in_ptr6 + (tmp92 + 4 * x0), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + 4 * x0, tmp78, xmask)
tl.store(out_ptr1 + 4 * x0, tmp94, xmask)
tl.store(out_ptr2 + 4 * x0, tmp89, xmask)
tl.store(out_ptr3 + 4 * x0, tmp84, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4,), (1,))
assert_size_stride(arg4_1, (4, 4), (4, 1))
assert_size_stride(arg5_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg2_1, (16, 4), (4, 1), 0),
reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
del arg0_1
del arg2_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_add_max_0[grid(16)](buf0, arg1_1, arg3_1, arg4_1,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg3_1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_1[grid(16)](buf1, buf0, arg1_1, arg4_1,
buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = buf1
del buf1
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
triton_poi_fused_add_max_2[grid(16)](buf3, buf0, arg1_1, arg4_1,
buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg4_1
del buf3
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf7 = reinterpret_tensor(buf11, (4, 1), (4, 1), 3)
buf8 = reinterpret_tensor(buf11, (4, 1), (4, 1), 0)
buf9 = reinterpret_tensor(buf11, (4, 1), (4, 1), 1)
buf10 = reinterpret_tensor(buf11, (4, 1), (4, 1), 2)
triton_poi_fused_add_gather_max_3[grid(4)](buf5, buf0, arg1_1,
arg5_1, buf6, buf4, buf2, buf7, buf8, buf9, buf10, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del arg1_1
del arg5_1
del buf0
del buf2
del buf4
del buf5
del buf6
return buf11,
class CRF(nn.Module):
"""
Implements Conditional Random Fields that can be trained via
backpropagation.
"""
def __init__(self, num_tags):
super(CRF, self).__init__()
self.num_tags = num_tags
self.transitions = nn.Parameter(torch.Tensor(num_tags, num_tags))
self.start_transitions = nn.Parameter(torch.randn(num_tags))
self.stop_transitions = nn.Parameter(torch.randn(num_tags))
nn.init.xavier_normal_(self.transitions)
def forward(self, feats):
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
return self._viterbi(feats)
def loss(self, feats, tags):
"""
Computes negative log likelihood between features and tags.
Essentially difference between individual sequence scores and
sum of all possible sequence scores (partition function)
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns:
Negative log likelihood [a scalar]
"""
if len(feats.shape) != 3:
raise ValueError('feats must be 3-d got {}-d'.format(feats.shape))
if len(tags.shape) != 2:
raise ValueError('tags must be 2-d but got {}-d'.format(tags.shape)
)
if feats.shape[:2] != tags.shape:
raise ValueError(
'First two dimensions of feats and tags must match')
sequence_score = self._sequence_score(feats, tags)
partition_function = self._partition_function(feats)
log_probability = sequence_score - partition_function
return -log_probability.mean()
def _sequence_score(self, feats, tags):
"""
Parameters:
feats: Input features [batch size, sequence length, number of tags]
tags: Target tag indices [batch size, sequence length]. Should be between
0 and num_tags
Returns: Sequence score of shape [batch size]
"""
feats.shape[0]
feat_score = feats.gather(2, tags.unsqueeze(-1)).squeeze(-1).sum(dim=-1
)
tags_pairs = tags.unfold(1, 2, 1)
indices = tags_pairs.permute(2, 0, 1).chunk(2)
trans_score = self.transitions[indices].squeeze(0).sum(dim=-1)
start_score = self.start_transitions[tags[:, 0]]
stop_score = self.stop_transitions[tags[:, -1]]
return feat_score + start_score + trans_score + stop_score
def _partition_function(self, feats):
"""
Computes the partitition function for CRF using the forward algorithm.
Basically calculate scores for all possible tag sequences for
the given feature vector sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns:
Total scores of shape [batch size]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
a = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
for i in range(1, seq_size):
feat = feats[:, i].unsqueeze(1)
a = self._log_sum_exp(a.unsqueeze(-1) + transitions + feat, 1)
return self._log_sum_exp(a + self.stop_transitions.unsqueeze(0), 1)
def _viterbi(self, feats):
"""
Uses Viterbi algorithm to predict the best sequence
Parameters:
feats: Input features [batch size, sequence length, number of tags]
Returns: Best tag sequence [batch size, sequence length]
"""
_, seq_size, num_tags = feats.shape
if self.num_tags != num_tags:
raise ValueError('num_tags should be {} but got {}'.format(self
.num_tags, num_tags))
v = feats[:, 0] + self.start_transitions.unsqueeze(0)
transitions = self.transitions.unsqueeze(0)
paths = []
for i in range(1, seq_size):
feat = feats[:, i]
v, idx = (v.unsqueeze(-1) + transitions).max(1)
paths.append(idx)
v = v + feat
v, tag = (v + self.stop_transitions.unsqueeze(0)).max(1, True)
tags = [tag]
for idx in reversed(paths):
tag = idx.gather(1, tag)
tags.append(tag)
tags.reverse()
return torch.cat(tags, 1)
def _log_sum_exp(self, logits, dim):
"""
Computes log-sum-exp in a stable way
"""
max_val, _ = logits.max(dim)
return max_val + (logits - max_val.unsqueeze(dim)).exp().sum(dim).log()
class OutputLayer(nn.Module):
"""
Abstract base class for output layer.
Handles projection to output labels
"""
def __init__(self, hidden_size, output_size):
super(OutputLayer, self).__init__()
self.output_size = output_size
self.output_projection = nn.Linear(hidden_size, output_size)
def loss(self, hidden, labels):
raise NotImplementedError('Must implement {}.loss'.format(self.
__class__.__name__))
class CRFOutputLayerNew(OutputLayer):
"""
Implements a CRF based output layer
"""
def __init__(self, hidden_size, output_size):
super(CRFOutputLayerNew, self).__init__(hidden_size, output_size)
self.crf = CRF(output_size)
def loss(self, hidden, labels):
feats = self.output_projection(hidden)
return self.crf.loss(feats, labels)
def forward(self, input_0):
arg0_1 = self.output_projection.weight
arg1_1 = self.output_projection.bias
arg4_1 = self.crf.transitions
arg3_1 = self.crf.start_transitions
arg5_1 = self.crf.stop_transitions
arg2_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
|
oya163/torchnlp
|
CRFOutputLayer
| false
| 4,118
|
[
"Apache-2.0"
] | 0
|
361caa24d741e47b8bd92af122ae281d6ad72d9d
|
https://github.com/oya163/torchnlp/tree/361caa24d741e47b8bd92af122ae281d6ad72d9d
|
baseRNN_predict
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 64), (64, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [obs], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 64), (64, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (4, 128), (128, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 128), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('GRUCell') != -1:
for param in m.parameters():
if len(param.shape) >= 2:
init.orthogonal_(param.data)
else:
init.normal_(param.data)
class baseRNN_predictNew(nn.Module):
def __init__(self, h_size, obs_dim, num_actions, context_input=False):
super(baseRNN_predictNew, self).__init__()
self.l1 = nn.Linear(h_size, 64)
self.l2 = nn.Linear(64, 128)
self.l3 = nn.Linear(128, obs_dim)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
lysuk96/rl_representations
|
baseRNN_predict
| false
| 15,981
|
[
"MIT"
] | 438
|
19de69305e40c9b3a1d746a7af26d232c9fb3f6f
|
https://github.com/lysuk96/rl_representations/tree/19de69305e40c9b3a1d746a7af26d232c9fb3f6f
|
RegressionModel
|
import torch
import torch.nn as nn
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=
3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 576 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 36 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (36, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (36,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 36, 4, 4), (576, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.
float32)
buf10 = reinterpret_tensor(buf9, (4, 144, 4), (576, 4, 1), 0)
del buf9
triton_poi_fused_clone_view_1[grid(64, 36)](buf10, buf8, primals_11,
64, 36, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1)
del buf8
del primals_11
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7)
class RegressionModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModelNew, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=
3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
CraigWang1/EfficientDet-PyTorch
|
RegressionModel
| false
| 13,539
|
[
"Apache-2.0"
] | 66
|
531d3c83338f03aa5c6f0615839c0ea5c03025f6
|
https://github.com/CraigWang1/EfficientDet-PyTorch/tree/531d3c83338f03aa5c6f0615839c0ea5c03025f6
|
PosNACLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gy/cgyeivaj5xohig6bokzkqeo2uatkrsikluo6qxgc3gxiv2okwbcm.py
# Topologically Sorted Source Nodes: [W], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# W => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [W], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import collections
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
def sparsity_error(W):
W_error = torch.min(torch.abs(W), torch.abs(1 - torch.abs(W)))
return torch.max(W_error)
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class DummySummaryWriter:
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
class PosNACLayerNew(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features)
)
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.W_hat)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(self.in_features,
self.out_features)
def forward(self, input_0):
primals_1 = self.W_hat
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
wlm2019/Neural-Arithmetic-Units
|
PosNACLayer
| false
| 16,718
|
[
"MIT"
] | 147
|
f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
https://github.com/wlm2019/Neural-Arithmetic-Units/tree/f9de9d004bb2dc2ee28577cd1760d0a00c185836
|
Net1
|
import torch
def square(x):
return x * x
class Net1(torch.nn.Module):
def __init__(self, hidden=64, output=10):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 4, kernel_size=7, padding=0, stride=3)
self.fc1 = torch.nn.Linear(256, hidden)
self.fc2 = torch.nn.Linear(hidden, output)
def forward(self, x):
x = self.conv1(x)
x = square(x)
x = x.view(-1, 256)
x = self.fc1(x)
x = square(x)
x = self.fc2(x)
return x
def mid_layer(self):
return ['o1', 'o1a', 'o2', 'o2a', 'o3']
def forward_analyze(self, x):
o1 = self.conv1(x)
o1a = square(o1)
o1a = x.view(-1, 256)
o2 = self.fc1(o1a)
o2a = square(o2)
o3 = self.fc2(o2a)
return o1, o1a, o2, o2a, o3
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 400 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 256), (256, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (10, 64), (64, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(3,
3), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 20, 20), (1600, 400, 20, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 20, 20), (1600, 400, 20, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_0[grid(6400)](buf1, primals_2,
buf2, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((25, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (25, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 64), (1, 256),
0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((25, 64), (64, 1), torch.float32)
triton_poi_fused_mul_1[grid(1600)](buf3, buf4, 1600, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((25, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_7
return buf5, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (25,
256), (256, 1), 0), buf3, buf4, primals_6, primals_4
def square(x):
return x * x
class Net1New(torch.nn.Module):
def __init__(self, hidden=64, output=10):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 4, kernel_size=7, padding=0, stride=3)
self.fc1 = torch.nn.Linear(256, hidden)
self.fc2 = torch.nn.Linear(hidden, output)
def mid_layer(self):
return ['o1', 'o1a', 'o2', 'o2a', 'o3']
def forward_analyze(self, x):
o1 = self.conv1(x)
o1a = square(o1)
o1a = x.view(-1, 256)
o2 = self.fc1(o1a)
o2a = square(o2)
o3 = self.fc2(o2a)
return o1, o1a, o2, o2a, o3
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
yxtj/henn
|
Net1
| false
| 11,058
|
[
"MIT"
] | 0
|
5093f3e637ba0bb3e48c4f890b3b469c3617f2c5
|
https://github.com/yxtj/henn/tree/5093f3e637ba0bb3e48c4f890b3b469c3617f2c5
|
Conv2d_spatial_sep
|
import torch
import torch.nn as nn
class Conv2d_spatial_sep(nn.Module):
def __init__(self, nin, nout):
super(Conv2d_spatial_sep, self).__init__()
self.conv1 = nn.Conv2d(nin, 1, kernel_size=(1, 3), groups=1, padding=0)
self.conv2 = nn.Conv2d(1, nout, kernel_size=(3, 1), groups=1, padding=1
)
def forward(self, x):
return self.conv2(self.conv1(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nin': 4, 'nout': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 3), (12, 3, 3, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 3, 1), (3, 3, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 2), (8, 8, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(32)](buf1, primals_2, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class Conv2d_spatial_sepNew(nn.Module):
def __init__(self, nin, nout):
super(Conv2d_spatial_sepNew, self).__init__()
self.conv1 = nn.Conv2d(nin, 1, kernel_size=(1, 3), groups=1, padding=0)
self.conv2 = nn.Conv2d(1, nout, kernel_size=(3, 1), groups=1, padding=1
)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
maet3608/torchy
|
Conv2d_spatial_sep
| false
| 3,969
|
[
"Apache-2.0"
] | 0
|
8c73732a1d4631bd97bfafdc18e52a22ff5410f7
|
https://github.com/maet3608/torchy/tree/8c73732a1d4631bd97bfafdc18e52a22ff5410f7
|
FullyConnected2
|
import torch
import torch.nn as nn
class FullyConnected2(nn.Module):
def __init__(self, hidden_size, output_size):
super(FullyConnected2, self).__init__()
self.lrelu = nn.LeakyReLU(0.1)
self.linear_layer = nn.Linear(hidden_size, hidden_size, bias=True)
self.linear_layer_1 = nn.Linear(hidden_size, output_size, bias=False)
def forward(self, input):
out = self.lrelu(self.linear_layer(input))
return self.linear_layer_1(out)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4
class FullyConnected2New(nn.Module):
def __init__(self, hidden_size, output_size):
super(FullyConnected2New, self).__init__()
self.lrelu = nn.LeakyReLU(0.1)
self.linear_layer = nn.Linear(hidden_size, hidden_size, bias=True)
self.linear_layer_1 = nn.Linear(hidden_size, output_size, bias=False)
def forward(self, input_0):
primals_1 = self.linear_layer.weight
primals_2 = self.linear_layer.bias
primals_4 = self.linear_layer_1.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Felix2048/SSM-VLN
|
FullyConnected2
| false
| 8,111
|
[
"MIT"
] | 27
|
25b9f98566d6e29d30e09aa8f96257f5935642d6
|
https://github.com/Felix2048/SSM-VLN/tree/25b9f98566d6e29d30e09aa8f96257f5935642d6
|
OutPutBlock
|
import torch
import torch.nn as nn
import torch.nn.functional
class OutPutBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutPutBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(self.in_chns, self.in_chns // 2, kernel_size
=1, padding=0)
self.conv2 = nn.Conv2d(self.in_chns // 2, self.out_chns,
kernel_size=1, padding=0)
self.drop1 = nn.Dropout2d(0.3)
self.drop2 = nn.Dropout2d(0.3)
self.ac1 = nn.LeakyReLU()
def forward(self, x):
x = self.drop1(x)
x = self.conv1(x)
x = self.ac1(x)
x = self.drop2(x)
x = self.conv2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 2
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1))
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(128)](buf0,
primals_3, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf4, primals_1, primals_2, primals_4, buf1, buf2
class OutPutBlockNew(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutPutBlockNew, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(self.in_chns, self.in_chns // 2, kernel_size
=1, padding=0)
self.conv2 = nn.Conv2d(self.in_chns // 2, self.out_chns,
kernel_size=1, padding=0)
self.drop1 = nn.Dropout2d(0.3)
self.drop2 = nn.Dropout2d(0.3)
self.ac1 = nn.LeakyReLU()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Luoxd1996/awesome-semi-supervised-learning-for-medical-image-segmentation
|
OutPutBlock
| false
| 17,709
|
[
"MIT"
] | 6
|
34d78f41e4fa5927b03cb9f9b2fd473cd16f5e57
|
https://github.com/Luoxd1996/awesome-semi-supervised-learning-for-medical-image-segmentation/tree/34d78f41e4fa5927b03cb9f9b2fd473cd16f5e57
|
CausalConv1d
|
import torch
import torch.nn as nn
class CausalConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2):
super().__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.padding, dilation=dilation)
def forward(self, minibatch):
return self.causal_conv(minibatch)[:, :, :-self.padding]
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 6 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(2,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 6), (24, 6, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0
), primals_1, primals_3
class CausalConv1dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2):
super().__init__()
self.padding = dilation
self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size,
padding=self.padding, dilation=dilation)
def forward(self, input_0):
primals_1 = self.causal_conv.weight
primals_2 = self.causal_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Hao-Kailong/DisFeb
|
CausalConv1d
| false
| 518
|
[
"MIT"
] | 0
|
2877edd587556e127d6648ee211ed22838c8d015
|
https://github.com/Hao-Kailong/DisFeb/tree/2877edd587556e127d6648ee211ed22838c8d015
|
MultiHeadedAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3u/c3ui6pflkqqmwiicu3k3k6nxfn3zxrzgar4nyb7sxfkreg6ab7we.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, exp, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3)
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(buf3, buf6, 64, 16, grid=grid(64), stream=stream0)
del buf3
buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attended], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8)
return (reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3)
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf3, buf6, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf3
buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64,
4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8)
return reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0)
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttentionNew(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttentionNew, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attended = torch.bmm(F.softmax(logits, dim=-1), values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim)
def forward(self, input_0, input_1, input_2):
primals_2 = self.input_weights
primals_5 = self.output_projection.weight
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
dojoteef/synst
|
MultiHeadedAttention
| false
| 15,199
|
[
"BSD-3-Clause"
] | 81
|
a1842682cf757e8a501cd9cee16f20e1a14158f1
|
https://github.com/dojoteef/synst/tree/a1842682cf757e8a501cd9cee16f20e1a14158f1
|
BasicBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/62/c62vdyzlu3lvskzid3jo7oiwnwhbmrkav2u5qcx2zjpp72hnxkny.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => add
# out_4 => relu_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf3, primals_1, buf4, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, primals_3, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3,
primals_1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockNew, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ZephyrII/competitive_colaboration
|
BasicBlock
| false
| 14,719
|
[
"MIT"
] | 357
|
a557d1e23ef2c0b8e3794f085a79bfffb860f9df
|
https://github.com/ZephyrII/competitive_colaboration/tree/a557d1e23ef2c0b8e3794f085a79bfffb860f9df
|
Attention
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Attention(nn.Module):
"""
Computing the attention over the words
"""
def __init__(self, input_dim, proj_dim):
super(Attention, self).__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.head = nn.Parameter(torch.Tensor(proj_dim, 1).uniform_(-0.1, 0.1))
self.proj = nn.Linear(input_dim, proj_dim)
def forward(self, input, input_mask):
"""
input: batch, max_text_len, input_dim
input_mask: batch, max_text_len
"""
batch, max_input_len, _input_dim = input.size()
proj_input = torch.tanh(self.proj(input.view(batch * max_input_len,
-1)))
att = torch.mm(proj_input, self.head)
att = att.view(batch, max_input_len, 1)
log_att = F.log_softmax(att, dim=1)
att = F.softmax(att, dim=1)
output = input * att * input_mask.unsqueeze(-1).detach()
output = output.sum(dim=1)
return output, att.squeeze(2), log_att.squeeze(2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'proj_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax__log_softmax_backward_data__softmax_2(in_ptr0
, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp14 = tl_math.exp(tmp0)
tmp15 = tmp14 / tmp11
tmp16 = tl_math.exp(tmp13)
tl.store(out_ptr0 + x2, tmp13, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr2 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr2 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp2 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp2 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp2 * tmp11
tmp13 = tmp10 + tmp12
tl.store(out_ptr0 + x4, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, primals_4, out=buf2)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__log_softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused__log_softmax__log_softmax_backward_data__softmax_2[
grid(16)](buf3, buf4, buf5, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(256)](primals_1, buf5, primals_5,
buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf6, reinterpret_tensor(buf5, (4, 4), (4, 1), 0
), reinterpret_tensor(buf4, (4, 4), (4, 1), 0
), primals_1, primals_5, buf1, buf5, buf7, reinterpret_tensor(primals_4
, (1, 4), (1, 1), 0)
class AttentionNew(nn.Module):
"""
Computing the attention over the words
"""
def __init__(self, input_dim, proj_dim):
super(AttentionNew, self).__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.head = nn.Parameter(torch.Tensor(proj_dim, 1).uniform_(-0.1, 0.1))
self.proj = nn.Linear(input_dim, proj_dim)
def forward(self, input_0, input_1):
primals_4 = self.head
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
|
Sein-Jang/R2A
|
Attention
| false
| 5,816
|
[
"MIT"
] | 1
|
f70b69cedb4de3dd60a36963c4b6a881d9d090ee
|
https://github.com/Sein-Jang/R2A/tree/f70b69cedb4de3dd60a36963c4b6a881d9d090ee
|
NRelu
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_9/inductor_cache/eb/ceb2aegliv53sw72ihlcws3ddru7dcvf6dtzxqzjejgol2wdpazv.py
# Topologically Sorted Source Nodes: [neg, relu, neg_1], Original ATen: [aten.neg, aten.relu]
# Source node to ATen node mapping:
# neg => neg
# neg_1 => neg_1
# relu => relu
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%neg,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%relu,), kwargs = {})
triton_poi_fused_neg_relu_0 = async_compile.triton('triton_poi_fused_neg_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_neg_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp4 = -tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, relu, neg_1], Original ATen: [aten.neg, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_neg_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.optim
import torch.backends.cudnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_neg_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp4 = -tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_neg_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NReluNew(nn.Module):
"""
-max(-x,0)
Parameters
----------
Input shape: (N, C, W, H)
Output shape: (N, C * W * H)
"""
def __init__(self, inplace):
super(NReluNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
minhtannguyen/pytorch_shake_shake
|
NRelu
| false
| 12,785
|
[
"MIT"
] | 0
|
d7f245d8d8b9e81a6020aadb438ffeae6d5593c2
|
https://github.com/minhtannguyen/pytorch_shake_shake/tree/d7f245d8d8b9e81a6020aadb438ffeae6d5593c2
|
TransitionUp
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/re/creleqpucbzvzrso3whbekvyjzfafblr33ygekztpuugjz5zfqbd.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_2 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 8
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 128)
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (20 + x0 + (9*x1) + (81*x2) + (324*x3)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x4 + (16*((-4) + x2)) + (64*x3)), tmp10 & xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x5), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 512, grid=grid(512), stream=stream0)
del buf0
del primals_2
del primals_4
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 8
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 128
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (20 + x0 + 9 * x1 + 81 * x2 + 324 * x3), tmp4 &
xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x4 + 16 * (-4 + x2) + 64 * x3), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1,
512, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_4
return buf1, primals_1, primals_3
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUpNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, input_0, input_1):
primals_1 = self.convTrans.weight
primals_2 = self.convTrans.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ELEKTRONN/elektronn3
|
TransitionUp
| false
| 13,641
|
[
"MIT"
] | 124
|
19c751855dffc67b744cd43e757aa4a5bd577d9b
|
https://github.com/ELEKTRONN/elektronn3/tree/19c751855dffc67b744cd43e757aa4a5bd577d9b
|
PDCBlock_converted
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# y_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/43/c43iah2ujzdzlzvirc5zcusvrhdz3liemhgusdpro5bcmzekdxpa.py
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# y_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_2), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_2, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1
class PDCBlock_convertedNew(nn.Module):
"""
CPDC, APDC can be converted to vanilla 3x3 convolution
RPDC can be converted to vanilla 5x5 convolution
"""
def __init__(self, pdc, inplane, ouplane, stride=1):
super(PDCBlock_convertedNew, self).__init__()
self.stride = stride
if self.stride > 1:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1,
padding=0)
if pdc == 'rd':
self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=5, padding
=2, groups=inplane, bias=False)
else:
self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=3, padding
=1, groups=inplane, bias=False)
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0,
bias=False)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
mgpadalkar/pidinet
|
PDCBlock_converted
| false
| 16,038
|
[
"MIT"
] | 137
|
781924fe30469cdc64f63ce6666a3e1f5b4e576f
|
https://github.com/mgpadalkar/pidinet/tree/781924fe30469cdc64f63ce6666a3e1f5b4e576f
|
GeLU
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# tanh => tanh
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import Module
import functools
import torch.utils.data
import torch.nn as nn
from torchvision.models import *
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_tanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GeLUNew(Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
class PrePostInitMeta(type):
"""A metaclass that calls optional `__pre_init__` and `__post_init__` methods"""
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
old_init = x.__init__
def _pass(self):
pass
@functools.wraps(old_init)
def _init(self, *args, **kwargs):
self.__pre_init__()
old_init(self, *args, **kwargs)
self.__post_init__()
x.__init__ = _init
if not hasattr(x, '__pre_init__'):
x.__pre_init__ = _pass
if not hasattr(x, '__post_init__'):
x.__post_init__ = _pass
return x
class Module(nn.Module, metaclass=PrePostInitMeta):
"""Same as `nn.Module`, but no need for subclasses to call `super().__init__`"""
def __pre_init__(self):
super().__init__()
def __init__(self):
pass
|
JiahuaWU/fastai
|
GeLU
| false
| 13,873
|
[
"Apache-2.0"
] | 59
|
13a2df812d875abf0558004283392ab40d9bdea1
|
https://github.com/JiahuaWU/fastai/tree/13a2df812d875abf0558004283392ab40d9bdea1
|
SeparableConvBlock
|
import math
import torch
import torch.utils.data
import torch.nn.functional as F
from itertools import product as product
from math import sqrt as sqrt
class Conv2dSamePadding(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
self.padding_method = kwargs.pop('padding', None)
if self.padding_method is None:
if len(args) >= 5:
self.padding_method = args[4]
else:
self.padding_method = 0
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
super().__init__(*args, **kwargs, padding=0)
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
if isinstance(self.dilation, int):
self.dilation = [self.dilation] * 2
elif len(self.dilation) == 1:
self.dilation = [self.dilation[0]] * 2
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
else:
super().__init__(*args, **kwargs, padding=self.padding_method)
self.norm = norm
self.activation = activation
def forward(self, x):
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
input_h, input_w = x.shape[-2:]
stride_h, stride_w = self.stride
kernel_size_h, kernel_size_w = self.kernel_size
dilation_h, dilation_w = self.dilation
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
padding_needed_h = max(0, (output_h - 1) * stride_h + (
kernel_size_h - 1) * dilation_h + 1 - input_h)
padding_needed_w = max(0, (output_w - 1) * stride_w + (
kernel_size_w - 1) * dilation_w + 1 - input_w)
left = padding_needed_w // 2
right = padding_needed_w - left
top = padding_needed_h // 2
bottom = padding_needed_h - top
x = F.pad(x, [left, right, top, bottom])
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class SeparableConvBlock(torch.nn.Module):
"""
Depthwise seperable convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, norm=None, activation=None):
"""
Args:
in_channels (int): the number of input tensor channels.
out_channels (int):the number of output tensor channels.
kernel_size (int): the kernel size.
stride (int or tuple or list): the stride.
bias (bool): if `True`, the pointwise conv applies bias.
apply_bn (bool): if `True`, apply BN layer after conv layer.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
super(SeparableConvBlock, self).__init__()
self.norm = norm
self.activation = activation
self.depthwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=in_channels, kernel_size=kernel_size, stride=
stride, padding=padding, dilation=dilation, groups=in_channels,
bias=False)
self.pointwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, groups=1, bias=bias)
if bias:
self.bias = self.pointwise.bias
def forward(self, inputs):
x = self.depthwise(inputs)
x = self.pointwise(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.utils.data
import torch.nn.functional as F
from itertools import product as product
from math import sqrt as sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf2, primals_4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_1, primals_2, primals_3, buf0
class Conv2dSamePadding(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
self.padding_method = kwargs.pop('padding', None)
if self.padding_method is None:
if len(args) >= 5:
self.padding_method = args[4]
else:
self.padding_method = 0
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
super().__init__(*args, **kwargs, padding=0)
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
elif len(self.stride) == 1:
self.stride = [self.stride[0]] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
elif len(self.kernel_size) == 1:
self.kernel_size = [self.kernel_size[0]] * 2
if isinstance(self.dilation, int):
self.dilation = [self.dilation] * 2
elif len(self.dilation) == 1:
self.dilation = [self.dilation[0]] * 2
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
else:
super().__init__(*args, **kwargs, padding=self.padding_method)
self.norm = norm
self.activation = activation
def forward(self, x):
if isinstance(self.padding_method, str):
if self.padding_method.upper() == 'SAME':
input_h, input_w = x.shape[-2:]
stride_h, stride_w = self.stride
kernel_size_h, kernel_size_w = self.kernel_size
dilation_h, dilation_w = self.dilation
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
padding_needed_h = max(0, (output_h - 1) * stride_h + (
kernel_size_h - 1) * dilation_h + 1 - input_h)
padding_needed_w = max(0, (output_w - 1) * stride_w + (
kernel_size_w - 1) * dilation_w + 1 - input_w)
left = padding_needed_w // 2
right = padding_needed_w - left
top = padding_needed_h // 2
bottom = padding_needed_h - top
x = F.pad(x, [left, right, top, bottom])
else:
raise ValueError('Unknown padding method: {}'.format(self.
padding_method))
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class SeparableConvBlockNew(torch.nn.Module):
"""
Depthwise seperable convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, bias=True, norm=None, activation=None):
"""
Args:
in_channels (int): the number of input tensor channels.
out_channels (int):the number of output tensor channels.
kernel_size (int): the kernel size.
stride (int or tuple or list): the stride.
bias (bool): if `True`, the pointwise conv applies bias.
apply_bn (bool): if `True`, apply BN layer after conv layer.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
super(SeparableConvBlockNew, self).__init__()
self.norm = norm
self.activation = activation
self.depthwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=in_channels, kernel_size=kernel_size, stride=
stride, padding=padding, dilation=dilation, groups=in_channels,
bias=False)
self.pointwise = Conv2dSamePadding(in_channels=in_channels,
out_channels=out_channels, kernel_size=1, stride=1, padding=0,
dilation=1, groups=1, bias=bias)
if bias:
self.bias = self.pointwise.bias
def forward(self, input_0):
primals_4 = self.bias
primals_1 = self.depthwise.weight
primals_3 = self.pointwise.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
lingtengqiu/LearnableTreeFilterV2
|
SeparableConvBlock
| false
| 7,098
|
[
"Apache-2.0"
] | 1
|
3814a5a84c0a5c33d6538749eaf5aed4827366de
|
https://github.com/lingtengqiu/LearnableTreeFilterV2/tree/3814a5a84c0a5c33d6538749eaf5aed4827366de
|
FourierEmbedding
|
import torch
from torch import nn
class FourierEmbedding(nn.Module):
def __init__(self, features, height, width, **kwargs):
super().__init__(**kwargs)
self.projector = nn.Linear(2, features)
self._height = height
self._width = width
def forward(self, y, x):
x_norm = 2 * x / (self._width - 1) - 1
y_norm = 2 * y / (self._height - 1) - 1
z = torch.cat((x_norm.unsqueeze(2), y_norm.unsqueeze(2)), dim=2)
return torch.sin(self.projector(z))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'features': 4, 'height': 4, 'width': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = 2.0
tmp7 = tmp5 * tmp6
tmp8 = 0.3333333333333333
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr1 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp17 * tmp6
tmp19 = tmp18 * tmp8
tmp20 = tmp19 - tmp10
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp14, tmp20, tmp21)
tmp23 = tl.where(tmp4, tmp13, tmp22)
tl.store(out_ptr0 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_sin_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.sin(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 2), (2, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 2), (
2, 1), 0), reinterpret_tensor(primals_3, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_sin_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf2, reinterpret_tensor(buf0, (16, 2), (2, 1), 0), buf1
class FourierEmbeddingNew(nn.Module):
def __init__(self, features, height, width, **kwargs):
super().__init__(**kwargs)
self.projector = nn.Linear(2, features)
self._height = height
self._width = width
def forward(self, input_0, input_1):
primals_3 = self.projector.weight
primals_4 = self.projector.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
LS4GAN/uvcgan
|
FourierEmbedding
| false
| 8,419
|
[
"BSD-2-Clause"
] | 20
|
376439ae2a9be684ff279ddf634fe137aadc5df5
|
https://github.com/LS4GAN/uvcgan/tree/376439ae2a9be684ff279ddf634fe137aadc5df5
|
ScaledDotProductAttention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_8/inductor_cache/4q/c4qoh645afcunrhaa5xye6sbkw2mzzlvmntdpffld4732bbjzx7o.py
# Topologically Sorted Source Nodes: [scaling_factor, attention], Original ATen: [aten.sqrt, aten._softmax]
# Source node to ATen node mapping:
# attention => exp
# scaling_factor => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %where_self), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_sqrt_0 = async_compile.triton('triton_poi_fused__softmax_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_8/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scaling_factor, attention], Original ATen: [aten.sqrt, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = 0.0
tmp3 = tmp1 >= tmp2
tmp4 = 1.0
tmp5 = -1.0
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp8 * tmp6
tmp11 = tmp10 * tmp6
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp6
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp6
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp7 - tmp18
tmp20 = tmp6 * tmp1
tmp21 = tmp19 / tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_sqrt_0[grid(64)](buf0, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
"""
Attention mechansims usually scale values based on relationships between
keys and queries.
Attention(Q,K,V) = A(Q,K)*V where A() is a normalization function.
A common choice for the normalization function is scaled dot-product attention:
A(Q,K) = Softmax(Q*K^T / sqrt(d_attention))
Args:
dropout (float): Fraction between 0 and 1 corresponding to the degree of dropout used
"""
def __init__(self, dropout=0.0):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
KalleBylin/tft_webapp
|
ScaledDotProductAttention
| false
| 9,227
|
[
"Apache-2.0"
] | 0
|
008f109e77f8bada417655dab482f340adb8cb6b
|
https://github.com/KalleBylin/tft_webapp/tree/008f109e77f8bada417655dab482f340adb8cb6b
|
LearnedPositionalEmbeddings
|
from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class LearnedPositionalEmbeddings(Module):
"""
<a id="LearnedPositionalEmbeddings"></a>
## Add parameterized positional encodings
This adds learned positional embeddings to patch embeddings.
"""
def __init__(self, d_model: 'int', max_len: 'int'=5000):
"""
* `d_model` is the transformer embeddings size
* `max_len` is the maximum number of patches
"""
super().__init__()
self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1,
d_model), requires_grad=True)
def forward(self, x: 'torch.Tensor'):
"""
* `x` is the patch embeddings of shape `[patches, batch_size, d_model]`
"""
pe = self.positional_encodings[x.shape[0]]
return x + pe
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (5000, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnedPositionalEmbeddingsNew(Module):
"""
<a id="LearnedPositionalEmbeddings"></a>
## Add parameterized positional encodings
This adds learned positional embeddings to patch embeddings.
"""
def __init__(self, d_model: 'int', max_len: 'int'=5000):
"""
* `d_model` is the transformer embeddings size
* `max_len` is the maximum number of patches
"""
super().__init__()
self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1,
d_model), requires_grad=True)
def forward(self, input_0):
primals_1 = self.positional_encodings
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
mcx/annotated_deep_learning_paper_implementations
|
LearnedPositionalEmbeddings
| false
| 7,203
|
[
"MIT"
] | 1
|
f169f3a71dd2d36eb28ad31062d3475efa367b88
|
https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88
|
Model
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/uu/cuupb7yo7ai64qlwi4lpnlvdf3gw6jlp543potvhemy2bbjwrt5a.py
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1073741824],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1073741824
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16777216
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16777216, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1))
assert_size_stride(primals_3, (16777216, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_1, primals_3, buf0, 1073741824, grid=grid(1073741824), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16777216, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16777216, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.nn.functional
from torch.nn.parameter import Parameter
from torch.nn.modules import Module
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.nn import Parameter
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 16777216
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16777216,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864,
16777216, 1))
assert_size_stride(primals_3, (16777216,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864,
16777216, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(1073741824)](primals_2, primals_1,
primals_3, buf0, 1073741824, XBLOCK=1024, num_warps=4, num_stages=1
)
return buf0, primals_1, primals_2, primals_3
class ModelNew(Module):
def __init__(self):
super(ModelNew, self).__init__()
self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0))
self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0))
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Jovonni/jukebox
|
Model
| false
| 1,908
|
[
"MIT"
] | 0
|
965a6f78aae67506a6e4fcdb205e2c39132e12e0
|
https://github.com/Jovonni/jukebox/tree/965a6f78aae67506a6e4fcdb205e2c39132e12e0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.