entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
InverseDepthSmoothnessLoss
|
import torch
import torch.nn as nn
def _gradient_x(img: 'torch.Tensor') ->torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :, :-1] - img[:, :, :, 1:]
def _gradient_y(img: 'torch.Tensor') ->torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :-1, :] - img[:, :, 1:, :]
def inverse_depth_smoothness_loss(idepth: 'torch.Tensor', image: 'torch.Tensor'
) ->torch.Tensor:
"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|
\\partial_x I_{ij} \\right \\|} + \\left |
\\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}
Args:
idepth (torch.Tensor): tensor with the inverse depth with shape :math:`(N, 1, H, W)`.
image (torch.Tensor): tensor with the input image with shape :math:`(N, 3, H, W)`.
Return:
torch.Tensor: a scalar with the computed loss.
Examples:
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> loss = inverse_depth_smoothness_loss(idepth, image)
"""
if not isinstance(idepth, torch.Tensor):
raise TypeError('Input idepth type is not a torch.Tensor. Got {}'.
format(type(idepth)))
if not isinstance(image, torch.Tensor):
raise TypeError('Input image type is not a torch.Tensor. Got {}'.
format(type(image)))
if not len(idepth.shape) == 4:
raise ValueError('Invalid idepth shape, we expect BxCxHxW. Got: {}'
.format(idepth.shape))
if not len(image.shape) == 4:
raise ValueError('Invalid image shape, we expect BxCxHxW. Got: {}'.
format(image.shape))
if not idepth.shape[-2:] == image.shape[-2:]:
raise ValueError(
'idepth and image shapes must be the same. Got: {} and {}'.
format(idepth.shape, image.shape))
if not idepth.device == image.device:
raise ValueError(
'idepth and image must be in the same device. Got: {} and {}'.
format(idepth.device, image.device))
if not idepth.dtype == image.dtype:
raise ValueError(
'idepth and image must be in the same dtype. Got: {} and {}'.
format(idepth.dtype, image.dtype))
idepth_dx: 'torch.Tensor' = _gradient_x(idepth)
idepth_dy: 'torch.Tensor' = _gradient_y(idepth)
image_dx: 'torch.Tensor' = _gradient_x(image)
image_dy: 'torch.Tensor' = _gradient_y(image)
weights_x: 'torch.Tensor' = torch.exp(-torch.mean(torch.abs(image_dx),
dim=1, keepdim=True))
weights_y: 'torch.Tensor' = torch.exp(-torch.mean(torch.abs(image_dy),
dim=1, keepdim=True))
smoothness_x: 'torch.Tensor' = torch.abs(idepth_dx * weights_x)
smoothness_y: 'torch.Tensor' = torch.abs(idepth_dy * weights_y)
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
class InverseDepthSmoothnessLoss(nn.Module):
"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|
\\partial_x I_{ij} \\right \\|} + \\left |
\\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}
Shape:
- Inverse Depth: :math:`(N, 1, H, W)`
- Image: :math:`(N, 3, H, W)`
- Output: scalar
Examples:
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> smooth = InverseDepthSmoothnessLoss()
>>> loss = smooth(idepth, image)
"""
def forward(self, idepth: 'torch.Tensor', image: 'torch.Tensor'
) ->torch.Tensor:
return inverse_depth_smoothness_loss(idepth, image)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_exp_mean_neg_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 4
x2 = xindex // 12
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (17 + x0 + 4 * x1 + 64 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), xmask)
tmp10 = tl.load(in_ptr0 + (33 + x0 + 4 * x1 + 64 * x2), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 4 * x1 + 64 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (49 + x0 + 4 * x1 + 64 * x2), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused_abs_exp_mean_neg_sub_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_per_fused_abs_add_exp_mean_mul_neg_sub_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 3
r5 = rindex // 3
r3 = rindex // 48
r4 = rindex % 12
r6 = rindex // 12
tmp0 = tl.load(in_ptr0 + (r0 + 4 * r5), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + r0 + 4 * r5), rmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (r4 + 12 * r3), rmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.load(in_ptr0 + (r4 + 16 * r6), rmask, other=0.0)
tmp11 = tl.load(in_ptr0 + (4 + r4 + 16 * r6), rmask, other=0.0)
tmp13 = tl.load(in_ptr2 + (r4 + 12 * r3), rmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl_math.abs(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp12 = tmp10 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tl_math.abs(tmp14)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = 192.0
tmp21 = tmp9 / tmp20
tmp22 = tmp19 / tmp20
tmp23 = tmp21 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 3), (12, 48, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_exp_mean_neg_sub_0[grid(48)](arg1_1, buf0, 48,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 1, 3, 4), (12, 48, 4, 1), torch.float32)
triton_poi_fused_abs_exp_mean_neg_sub_1[grid(48)](arg1_1, buf2, 48,
XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1
del buf1
triton_per_fused_abs_add_exp_mean_mul_neg_sub_2[grid(1)](buf4,
arg0_1, buf0, buf2, 1, 192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
del buf2
return buf4,
def _gradient_x(img: 'torch.Tensor') ->torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :, :-1] - img[:, :, :, 1:]
def _gradient_y(img: 'torch.Tensor') ->torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :-1, :] - img[:, :, 1:, :]
def inverse_depth_smoothness_loss(idepth: 'torch.Tensor', image: 'torch.Tensor'
) ->torch.Tensor:
"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|
\\partial_x I_{ij} \\right \\|} + \\left |
\\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}
Args:
idepth (torch.Tensor): tensor with the inverse depth with shape :math:`(N, 1, H, W)`.
image (torch.Tensor): tensor with the input image with shape :math:`(N, 3, H, W)`.
Return:
torch.Tensor: a scalar with the computed loss.
Examples:
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> loss = inverse_depth_smoothness_loss(idepth, image)
"""
if not isinstance(idepth, torch.Tensor):
raise TypeError('Input idepth type is not a torch.Tensor. Got {}'.
format(type(idepth)))
if not isinstance(image, torch.Tensor):
raise TypeError('Input image type is not a torch.Tensor. Got {}'.
format(type(image)))
if not len(idepth.shape) == 4:
raise ValueError('Invalid idepth shape, we expect BxCxHxW. Got: {}'
.format(idepth.shape))
if not len(image.shape) == 4:
raise ValueError('Invalid image shape, we expect BxCxHxW. Got: {}'.
format(image.shape))
if not idepth.shape[-2:] == image.shape[-2:]:
raise ValueError(
'idepth and image shapes must be the same. Got: {} and {}'.
format(idepth.shape, image.shape))
if not idepth.device == image.device:
raise ValueError(
'idepth and image must be in the same device. Got: {} and {}'.
format(idepth.device, image.device))
if not idepth.dtype == image.dtype:
raise ValueError(
'idepth and image must be in the same dtype. Got: {} and {}'.
format(idepth.dtype, image.dtype))
idepth_dx: 'torch.Tensor' = _gradient_x(idepth)
idepth_dy: 'torch.Tensor' = _gradient_y(idepth)
image_dx: 'torch.Tensor' = _gradient_x(image)
image_dy: 'torch.Tensor' = _gradient_y(image)
weights_x: 'torch.Tensor' = torch.exp(-torch.mean(torch.abs(image_dx),
dim=1, keepdim=True))
weights_y: 'torch.Tensor' = torch.exp(-torch.mean(torch.abs(image_dy),
dim=1, keepdim=True))
smoothness_x: 'torch.Tensor' = torch.abs(idepth_dx * weights_x)
smoothness_y: 'torch.Tensor' = torch.abs(idepth_dy * weights_y)
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
class InverseDepthSmoothnessLossNew(nn.Module):
"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\\text{loss} = \\left | \\partial_x d_{ij} \\right | e^{-\\left \\|
\\partial_x I_{ij} \\right \\|} + \\left |
\\partial_y d_{ij} \\right | e^{-\\left \\| \\partial_y I_{ij} \\right \\|}
Shape:
- Inverse Depth: :math:`(N, 1, H, W)`
- Image: :math:`(N, 3, H, W)`
- Output: scalar
Examples:
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> smooth = InverseDepthSmoothnessLoss()
>>> loss = smooth(idepth, image)
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JoanFM/kornia
|
InverseDepthSmoothnessLoss
| false
| 11,551
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
RgbaToBgr
|
import torch
import torch.nn as nn
def bgr_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a BGR image to RGB.
Args:
image: BGR Image to be converted to BGR of shape :math:`(*,3,H,W)`.
Returns:
RGB version of the image with shape of shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = bgr_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}'
.format(image.shape))
out: 'torch.Tensor' = image.flip(-3)
return out
def rgb_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a RGB image to BGR.
.. image:: _static/img/rgb_to_bgr.png
Args:
image: RGB Image to be converted to BGRof of shape :math:`(*,3,H,W)`.
Returns:
BGR version of the image with shape of shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_bgr(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}'
.format(image.shape))
return bgr_to_rgb(image)
def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert an image from RGBA to RGB.
Args:
image: RGBA Image to be converted to RGB of shape :math:`(*,4,H,W)`.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 4, 4, 5)
>>> output = rgba_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')
r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)
a_one = torch.tensor(1.0) - a
a_one * r + a * r
a_one * g + a * g
a_one * b + a * b
return torch.cat([r, g, b], dim=-3)
def rgba_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert an image from RGBA to BGR.
Args:
image: RGBA Image to be converted to BGR of shape :math:`(*,4,H,W)`.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 4, 4, 5)
>>> output = rgba_to_bgr(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')
x_rgb: 'torch.Tensor' = rgba_to_rgb(image)
return rgb_to_bgr(x_rgb)
class RgbaToBgr(nn.Module):
"""Convert an image from RGBA to BGR.
Remove an alpha channel from BGR image.
Returns:
BGR version of the image.
Shape:
- image: :math:`(*, 4, H, W)`
- output: :math:`(*, 3, H, W)`
Example:
>>> input = torch.rand(2, 4, 4, 5)
>>> rgba = RgbaToBgr()
>>> output = rgba(input) # 2x3x4x5
"""
def forward(self, image: 'torch.Tensor') ->torch.Tensor:
return rgba_to_bgr(image)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = 2 + -1 * x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_flip_0[grid(192)](arg0_1, buf0, 192, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def bgr_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a BGR image to RGB.
Args:
image: BGR Image to be converted to BGR of shape :math:`(*,3,H,W)`.
Returns:
RGB version of the image with shape of shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = bgr_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}'
.format(image.shape))
out: 'torch.Tensor' = image.flip(-3)
return out
def rgb_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert a RGB image to BGR.
.. image:: _static/img/rgb_to_bgr.png
Args:
image: RGB Image to be converted to BGRof of shape :math:`(*,3,H,W)`.
Returns:
BGR version of the image with shape of shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_bgr(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}'
.format(image.shape))
return bgr_to_rgb(image)
def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert an image from RGBA to RGB.
Args:
image: RGBA Image to be converted to RGB of shape :math:`(*,4,H,W)`.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 4, 4, 5)
>>> output = rgba_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')
r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3)
a_one = torch.tensor(1.0) - a
a_one * r + a * r
a_one * g + a * g
a_one * b + a * b
return torch.cat([r, g, b], dim=-3)
def rgba_to_bgr(image: 'torch.Tensor') ->torch.Tensor:
"""Convert an image from RGBA to BGR.
Args:
image: RGBA Image to be converted to BGR of shape :math:`(*,4,H,W)`.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> input = torch.rand(2, 4, 4, 5)
>>> output = rgba_to_bgr(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')
if len(image.shape) < 3 or image.shape[-3] != 4:
raise ValueError(
f'Input size must have a shape of (*, 4, H, W).Got {image.shape}')
x_rgb: 'torch.Tensor' = rgba_to_rgb(image)
return rgb_to_bgr(x_rgb)
class RgbaToBgrNew(nn.Module):
"""Convert an image from RGBA to BGR.
Remove an alpha channel from BGR image.
Returns:
BGR version of the image.
Shape:
- image: :math:`(*, 4, H, W)`
- output: :math:`(*, 3, H, W)`
Example:
>>> input = torch.rand(2, 4, 4, 5)
>>> rgba = RgbaToBgr()
>>> output = rgba(input) # 2x3x4x5
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JoanFM/kornia
|
RgbaToBgr
| false
| 11,552
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
Encoder
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, out_dim=64):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.l1 = nn.Linear(64, 64)
self.l2 = nn.Linear(64, out_dim)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv3(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool(x)
h = torch.mean(x, dim=[2, 3])
x = self.l1(h)
x = F.relu(x)
x = self.l2(x)
return h, x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_per_fused_max_pool2d_with_indices_mean_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = rindex // 4
x0 = xindex
r3 = rindex
tmp0 = tl.load(in_ptr0 + (2 * r1 + 16 * r2 + 64 * x0), xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + 2 * r1 + 16 * r2 + 64 * x0), xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr0 + (8 + 2 * r1 + 16 * r2 + 64 * x0), xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr0 + (9 + 2 * r1 + 16 * r2 + 64 * x0), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 16.0
tmp22 = tmp20 / tmp21
tl.store(out_ptr0 + (r3 + 16 * x0), tmp15, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64), (64, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64), (64, 1))
assert_size_stride(primals_13, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2,
buf3, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6,
buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 16, 16), (16384, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(65536)](buf9, primals_7,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.
float32)
buf11 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(16384)](buf9, buf10,
buf11, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 8, 8), (4096, 64, 8, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_6[grid(16384)](buf13, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.int8)
buf15 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf16 = buf15
del buf15
triton_per_fused_max_pool2d_with_indices_mean_7[grid(256)](buf16,
buf13, buf14, 256, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf17 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf16, reinterpret_tensor(primals_10, (64, 64), (
1, 64), 0), out=buf17)
buf18 = buf17
del buf17
triton_poi_fused_relu_8[grid(256)](buf18, primals_11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf19 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_13, buf18, reinterpret_tensor(
primals_12, (64, 64), (1, 64), 0), alpha=1, beta=1, out=buf19)
del primals_13
return (buf16, buf19, primals_1, primals_3, primals_4, primals_6,
primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11,
buf13, buf14, buf16, buf18, primals_12, primals_10)
class EncoderNew(nn.Module):
def __init__(self, out_dim=64):
super(EncoderNew, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.l1 = nn.Linear(64, 64)
self.l2 = nn.Linear(64, out_dim)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.l1.weight
primals_11 = self.l1.bias
primals_12 = self.l2.weight
primals_13 = self.l2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
JanSoltysik/SimCLR
|
Encoder
| false
| 11,553
|
[
"MIT"
] | 0
|
34ea6d17a630382b65a00aa445d82876754ee679
|
https://github.com/JanSoltysik/SimCLR/tree/34ea6d17a630382b65a00aa445d82876754ee679
|
InvDepth
|
import torch
import torch.nn as nn
class InvDepth(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super(InvDepth, self).__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
return self.w.clamp(min=self._min_range, max=self._max_range)
def get_inputs():
return []
def get_init_inputs():
return [[], {'height': 4, 'width': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.04
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4, 4), (16, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_le_logical_and_0[grid(16)](primals_1,
buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf0, buf1
class InvDepthNew(nn.Module):
def __init__(self, height, width, min_depth=0.5, max_depth=25.0):
super(InvDepthNew, self).__init__()
self._min_range = 1.0 / max_depth
self._max_range = 1.0 / min_depth
self.w = nn.Parameter(self._init_weights(height, width))
def _init_weights(self, height, width):
r1 = self._min_range
r2 = self._min_range + (self._max_range - self._min_range) * 0.1
w_init = (r1 - r2) * torch.rand(1, 1, height, width) + r2
return w_init
def forward(self):
primals_1 = self.w
output = call([primals_1])
return output[0]
|
JoanFM/kornia
|
InvDepth
| false
| 11,554
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
PoseNetFeat
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
class PoseNetFeat(nn.Module):
def __init__(self, num_points):
super(PoseNetFeat, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.e_conv1 = torch.nn.Conv1d(32, 64, 1)
self.e_conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv5 = torch.nn.Conv1d(256, 256, 1)
self.all_conv1 = torch.nn.Conv1d(640, 320, 1)
self.all_conv2 = torch.nn.Conv1d(320, 160, 1)
self.num_points = num_points
def forward(self, x, emb):
x = F.relu(self.conv1(x))
emb = F.relu(self.e_conv1(emb))
pointfeat_1 = torch.cat((x, emb), dim=1)
x = F.relu(self.conv2(x))
emb = F.relu(self.e_conv2(emb))
pointfeat_2 = torch.cat((x, emb), dim=1)
x = F.relu(self.conv5(pointfeat_2))
x = torch.cat([pointfeat_1, pointfeat_2, x], dim=1).contiguous()
x = F.leaky_relu(self.all_conv1(x))
x = self.all_conv2(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64]), torch.rand([4, 32, 64])]
def get_init_inputs():
return [[], {'num_points': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 256
x0 = xindex % 64
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 8192 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp15 = tl.load(in_ptr2 + (x0 + 64 * (-128 + x1) + 8192 * x2), tmp12,
other=0.0)
tmp16 = tl.load(in_ptr3 + (-128 + x1), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x3, tmp21, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 640
x0 = xindex % 64
x2 = xindex // 40960
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 64, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tmp6 & tmp4
tmp8 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), tmp7, other=0.0)
tmp9 = tmp0 >= tmp5
tmp10 = tmp9 & tmp4
tmp11 = tl.load(in_ptr1 + (x0 + 64 * (-64 + x1) + 4096 * x2), tmp10,
other=0.0)
tmp12 = tl.where(tmp6, tmp8, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 384, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr2 + (x0 + 64 * (-128 + x1) + 16384 * x2), tmp18,
other=0.0)
tmp20 = tmp0 >= tmp16
tl.full([1], 640, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 64 * (-384 + x1) + 16384 * x2), tmp20,
other=0.0)
tmp24 = tl.load(in_ptr4 + (-384 + x1), tmp20, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full([1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp18, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp14, tmp30)
tl.store(out_ptr0 + x3, tmp31, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 320
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 160
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 1), (3, 1, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64), (192, 64, 1))
assert_size_stride(primals_4, (64, 32, 1), (32, 1, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 32, 64), (2048, 64, 1))
assert_size_stride(primals_7, (128, 64, 1), (64, 1, 1))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (128, 64, 1), (64, 1, 1))
assert_size_stride(primals_10, (128,), (1,))
assert_size_stride(primals_11, (256, 256, 1), (256, 1, 1))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (320, 640, 1), (640, 1, 1))
assert_size_stride(primals_14, (320,), (1,))
assert_size_stride(primals_15, (160, 320, 1), (320, 1, 1))
assert_size_stride(primals_16, (160,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64), (4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5,
16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf1, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 128, 64), (8192, 64, 1))
buf5 = extern_kernels.convolution(buf3, primals_9, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 64), (8192, 64, 1))
buf6 = empty_strided_cuda((4, 256, 64), (16384, 64, 1), torch.float32)
triton_poi_fused_cat_1[grid(65536)](buf4, primals_8, buf5,
primals_10, buf6, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf7, (4, 256, 64), (16384, 64, 1))
buf8 = empty_strided_cuda((4, 640, 64), (40960, 64, 1), torch.float32)
triton_poi_fused_cat_2[grid(163840)](buf1, buf3, buf6, buf7,
primals_12, buf8, 163840, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_13, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf9, (4, 320, 64), (20480, 64, 1))
buf10 = empty_strided_cuda((4, 320, 64), (20480, 64, 1), torch.bool)
buf11 = empty_strided_cuda((4, 320, 64), (20480, 64, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(81920)](buf9,
primals_14, buf10, buf11, 81920, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf9
del primals_14
buf12 = extern_kernels.convolution(buf11, primals_15, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf12, (4, 160, 64), (10240, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_4[grid(40960)](buf13, primals_16,
40960, XBLOCK=512, num_warps=4, num_stages=1)
del primals_16
buf14 = empty_strided_cuda((4, 256, 64), (16384, 64, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(65536)](
buf7, primals_12, buf14, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf7
del primals_12
buf15 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(32768)](
buf5, primals_10, buf15, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del buf5
del primals_10
buf16 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_6[grid(32768)](
buf4, primals_8, buf16, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del buf4
del primals_8
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_7,
primals_9, primals_11, primals_13, primals_15, buf1, buf3, buf6,
buf8, buf10, buf11, buf14, buf15, buf16)
class PoseNetFeatNew(nn.Module):
def __init__(self, num_points):
super(PoseNetFeatNew, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.e_conv1 = torch.nn.Conv1d(32, 64, 1)
self.e_conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv5 = torch.nn.Conv1d(256, 256, 1)
self.all_conv1 = torch.nn.Conv1d(640, 320, 1)
self.all_conv2 = torch.nn.Conv1d(320, 160, 1)
self.num_points = num_points
def forward(self, input_0, input_1):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_7 = self.conv2.weight
primals_8 = self.conv2.bias
primals_4 = self.e_conv1.weight
primals_5 = self.e_conv1.bias
primals_9 = self.e_conv2.weight
primals_10 = self.e_conv2.bias
primals_11 = self.conv5.weight
primals_12 = self.conv5.bias
primals_13 = self.all_conv1.weight
primals_14 = self.all_conv1.bias
primals_15 = self.all_conv2.weight
primals_16 = self.all_conv2.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16])
return output[0]
|
JiazeWang/6-PACK
|
PoseNetFeat
| false
| 11,555
|
[
"MIT"
] | 0
|
bce910213cfbf89b4ed7b59ff6c70a59a7c19b99
|
https://github.com/JiazeWang/6-PACK/tree/bce910213cfbf89b4ed7b59ff6c70a59a7c19b99
|
Hflip
|
import torch
import torch.nn as nn
def hflip(input: 'torch.Tensor') ->torch.Tensor:
return torch.flip(input, [-1])
class Hflip(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
Examples:
>>> hflip = Hflip()
>>> input = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 1.]
... ]]])
>>> hflip(input)
tensor([[[[0., 0., 0.],
[0., 0., 0.],
[1., 1., 0.]]]])
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return hflip(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + -1 * x0 + 4 * x1), xmask, eviction_policy
='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hflip(input: 'torch.Tensor') ->torch.Tensor:
return torch.flip(input, [-1])
class HflipNew(nn.Module):
"""Horizontally flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The horizontally flipped image tensor
Examples:
>>> hflip = Hflip()
>>> input = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 1.]
... ]]])
>>> hflip(input)
tensor([[[[0., 0., 0.],
[0., 0., 0.],
[1., 1., 0.]]]])
"""
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JoanFM/kornia
|
Hflip
| false
| 11,556
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
BinaryFocalLossWithLogits
|
import torch
import torch.nn as nn
def binary_focal_loss_with_logits(input: 'torch.Tensor', target:
'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction:
'str'='none', eps: 'float'=1e-08) ->torch.Tensor:
"""Function that computes Binary Focal loss.
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
input (torch.Tensor): input data tensor with shape :math:`(N, 1, *)`.
target (torch.Tensor): the target tensor with shape :math:`(N, 1, *)`.
alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. Default: 0.25.
gamma (float): Focusing parameter :math:`\\gamma >= 0`. Default: 2.0.
reduction (str, optional): Specifies the reduction to apply to the. Default: 'none'.
eps (float): for numerically stability when dividing. Default: 1e-8.
Returns:
torch.tensor: the computed loss.
Examples:
>>> num_classes = 1
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> logits = torch.tensor([[[[6.325]]],[[[5.26]]],[[[87.49]]]])
>>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]])
>>> binary_focal_loss_with_logits(logits, labels, **kwargs)
tensor(4.6052)
"""
if not isinstance(input, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(input)))
if not len(input.shape) >= 2:
raise ValueError('Invalid input shape, we expect BxCx*. Got: {}'.
format(input.shape))
if input.size(0) != target.size(0):
raise ValueError(
'Expected input batch_size ({}) to match target batch_size ({}).'
.format(input.size(0), target.size(0)))
probs = torch.sigmoid(input)
target = target.unsqueeze(dim=1)
loss_tmp = -alpha * torch.pow(1.0 - probs + eps, gamma
) * target * torch.log(probs + eps) - (1 - alpha) * torch.pow(probs +
eps, gamma) * (1.0 - target) * torch.log(1.0 - probs + eps)
loss_tmp = loss_tmp.squeeze(dim=1)
if reduction == 'none':
loss = loss_tmp
elif reduction == 'mean':
loss = torch.mean(loss_tmp)
elif reduction == 'sum':
loss = torch.sum(loss_tmp)
else:
raise NotImplementedError('Invalid reduction mode: {}'.format(
reduction))
return loss
class BinaryFocalLossWithLogits(nn.Module):
"""Criterion that computes Focal loss.
According to :cite:`lin2017focal`, the Focal loss is computed as follows:
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`.
gamma (float): Focusing parameter :math:`\\gamma >= 0`.
reduction (str, optional): Specifies the reduction to apply to the
output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied,
‘mean’: the sum of the output will be divided by the number of elements
in the output, ‘sum’: the output will be summed. Default: ‘none’.
Shape:
- Input: :math:`(N, 1, *)`.
- Target: :math:`(N, 1, *)`.
Examples:
>>> N = 1 # num_classes
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> loss = BinaryFocalLossWithLogits(**kwargs)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str'
='none') ->None:
super(BinaryFocalLossWithLogits, self).__init__()
self.alpha: 'float' = alpha
self.gamma: 'float' = gamma
self.reduction: 'str' = reduction
self.eps: 'float' = 1e-08
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
return binary_focal_loss_with_logits(input, target, self.alpha,
self.gamma, self.reduction, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x0 = xindex % 64
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = tmp5 * tmp5
tmp7 = -4.0
tmp8 = tmp6 * tmp7
tmp10 = tmp8 * tmp9
tmp11 = tmp1 + tmp4
tmp12 = tl_math.log(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp11 * tmp11
tmp15 = -3.0
tmp16 = tmp14 * tmp15
tmp17 = tmp2 - tmp9
tmp18 = tmp16 * tmp17
tmp19 = tl_math.log(tmp5)
tmp20 = tmp18 * tmp19
tmp21 = tmp13 - tmp20
tl.store(out_ptr0 + x4, tmp21, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_log_mul_pow_rsub_sigmoid_squeeze_sub_0[grid(1024)
](arg0_1, arg1_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1
)
del arg0_1
del arg1_1
return buf0,
def binary_focal_loss_with_logits(input: 'torch.Tensor', target:
'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction:
'str'='none', eps: 'float'=1e-08) ->torch.Tensor:
"""Function that computes Binary Focal loss.
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
input (torch.Tensor): input data tensor with shape :math:`(N, 1, *)`.
target (torch.Tensor): the target tensor with shape :math:`(N, 1, *)`.
alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. Default: 0.25.
gamma (float): Focusing parameter :math:`\\gamma >= 0`. Default: 2.0.
reduction (str, optional): Specifies the reduction to apply to the. Default: 'none'.
eps (float): for numerically stability when dividing. Default: 1e-8.
Returns:
torch.tensor: the computed loss.
Examples:
>>> num_classes = 1
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> logits = torch.tensor([[[[6.325]]],[[[5.26]]],[[[87.49]]]])
>>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]])
>>> binary_focal_loss_with_logits(logits, labels, **kwargs)
tensor(4.6052)
"""
if not isinstance(input, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(input)))
if not len(input.shape) >= 2:
raise ValueError('Invalid input shape, we expect BxCx*. Got: {}'.
format(input.shape))
if input.size(0) != target.size(0):
raise ValueError(
'Expected input batch_size ({}) to match target batch_size ({}).'
.format(input.size(0), target.size(0)))
probs = torch.sigmoid(input)
target = target.unsqueeze(dim=1)
loss_tmp = -alpha * torch.pow(1.0 - probs + eps, gamma
) * target * torch.log(probs + eps) - (1 - alpha) * torch.pow(probs +
eps, gamma) * (1.0 - target) * torch.log(1.0 - probs + eps)
loss_tmp = loss_tmp.squeeze(dim=1)
if reduction == 'none':
loss = loss_tmp
elif reduction == 'mean':
loss = torch.mean(loss_tmp)
elif reduction == 'sum':
loss = torch.sum(loss_tmp)
else:
raise NotImplementedError('Invalid reduction mode: {}'.format(
reduction))
return loss
class BinaryFocalLossWithLogitsNew(nn.Module):
"""Criterion that computes Focal loss.
According to :cite:`lin2017focal`, the Focal loss is computed as follows:
.. math::
\\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Args:
alpha (float): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`.
gamma (float): Focusing parameter :math:`\\gamma >= 0`.
reduction (str, optional): Specifies the reduction to apply to the
output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied,
‘mean’: the sum of the output will be divided by the number of elements
in the output, ‘sum’: the output will be summed. Default: ‘none’.
Shape:
- Input: :math:`(N, 1, *)`.
- Target: :math:`(N, 1, *)`.
Examples:
>>> N = 1 # num_classes
>>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'}
>>> loss = BinaryFocalLossWithLogits(**kwargs)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str'
='none') ->None:
super(BinaryFocalLossWithLogitsNew, self).__init__()
self.alpha: 'float' = alpha
self.gamma: 'float' = gamma
self.reduction: 'str' = reduction
self.eps: 'float' = 1e-08
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
JoanFM/kornia
|
BinaryFocalLossWithLogits
| false
| 11,557
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
TotalVariation
|
import torch
import torch.nn as nn
def total_variation(img: 'torch.Tensor') ->torch.Tensor:
"""Function that computes Total Variation according to [1].
Args:
img (torch.Tensor): the input image with shape :math:`(N, C, H, W)` or :math:`(C, H, W)`.
Return:
torch.Tensor: a scalar with the computer loss.
Examples:
>>> total_variation(torch.ones(3, 4, 4))
tensor(0.)
Reference:
[1] https://en.wikipedia.org/wiki/Total_variation
"""
if not isinstance(img, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(img)}')
if len(img.shape) < 3 or len(img.shape) > 4:
raise ValueError(
f'Expected input tensor to be of ndim 3 or 4, but got {len(img.shape)}.'
)
pixel_dif1 = img[..., 1:, :] - img[..., :-1, :]
pixel_dif2 = img[..., :, 1:] - img[..., :, :-1]
reduce_axes = -3, -2, -1
res1 = pixel_dif1.abs().sum(dim=reduce_axes)
res2 = pixel_dif2.abs().sum(dim=reduce_axes)
return res1 + res2
class TotalVariation(nn.Module):
"""Computes the Total Variation according to [1].
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`.
- Output: :math:`(N,)` or scalar.
Examples:
>>> tv = TotalVariation()
>>> output = tv(torch.ones((2, 3, 4, 4), requires_grad=True))
>>> output.data
tensor([0., 0.])
>>> output.sum().backward() # grad can be implicitly created only for scalar outputs
Reference:
[1] https://en.wikipedia.org/wiki/Total_variation
"""
def forward(self, img) ->torch.Tensor:
return total_variation(img)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 48
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex % 12
r2 = rindex // 12
x0 = xindex
r3 = rindex % 3
r4 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r1 + 16 * r2 + 64 * x0), rmask & xmask,
other=0.0)
tmp1 = tl.load(in_ptr0 + (r1 + 16 * r2 + 64 * x0), rmask & xmask, other=0.0
)
tmp8 = tl.load(in_ptr0 + (1 + r3 + 4 * r4 + 64 * x0), rmask & xmask,
other=0.0)
tmp9 = tl.load(in_ptr0 + (r3 + 4 * r4 + 64 * x0), rmask & xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask & xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0[grid(4)](buf2, arg0_1, 4, 48,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
def total_variation(img: 'torch.Tensor') ->torch.Tensor:
"""Function that computes Total Variation according to [1].
Args:
img (torch.Tensor): the input image with shape :math:`(N, C, H, W)` or :math:`(C, H, W)`.
Return:
torch.Tensor: a scalar with the computer loss.
Examples:
>>> total_variation(torch.ones(3, 4, 4))
tensor(0.)
Reference:
[1] https://en.wikipedia.org/wiki/Total_variation
"""
if not isinstance(img, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(img)}')
if len(img.shape) < 3 or len(img.shape) > 4:
raise ValueError(
f'Expected input tensor to be of ndim 3 or 4, but got {len(img.shape)}.'
)
pixel_dif1 = img[..., 1:, :] - img[..., :-1, :]
pixel_dif2 = img[..., :, 1:] - img[..., :, :-1]
reduce_axes = -3, -2, -1
res1 = pixel_dif1.abs().sum(dim=reduce_axes)
res2 = pixel_dif2.abs().sum(dim=reduce_axes)
return res1 + res2
class TotalVariationNew(nn.Module):
"""Computes the Total Variation according to [1].
Shape:
- Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`.
- Output: :math:`(N,)` or scalar.
Examples:
>>> tv = TotalVariation()
>>> output = tv(torch.ones((2, 3, 4, 4), requires_grad=True))
>>> output.data
tensor([0., 0.])
>>> output.sum().backward() # grad can be implicitly created only for scalar outputs
Reference:
[1] https://en.wikipedia.org/wiki/Total_variation
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JoanFM/kornia
|
TotalVariation
| false
| 11,558
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
Vflip
|
import torch
import torch.nn as nn
def vflip(input: 'torch.Tensor') ->torch.Tensor:
return torch.flip(input, [-2])
class Vflip(nn.Module):
"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
Examples:
>>> vflip = Vflip()
>>> input = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 1.]
... ]]])
>>> vflip(input)
tensor([[[[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]]]])
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return vflip(input)
def __repr__(self):
return self.__class__.__name__
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x0 + -4 * x1 + 16 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def vflip(input: 'torch.Tensor') ->torch.Tensor:
return torch.flip(input, [-2])
class VflipNew(nn.Module):
"""Vertically flip a tensor image or a batch of tensor images. Input must
be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.
Args:
input (torch.Tensor): input tensor
Returns:
torch.Tensor: The vertically flipped image tensor
Examples:
>>> vflip = Vflip()
>>> input = torch.tensor([[[
... [0., 0., 0.],
... [0., 0., 0.],
... [0., 1., 1.]
... ]]])
>>> vflip(input)
tensor([[[[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]]]])
"""
def __repr__(self):
return self.__class__.__name__
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JoanFM/kornia
|
Vflip
| false
| 11,559
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
LinearSum
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LinearSum(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input=
'relu', activ_output='relu', normalize=False, dropout_input=0.0,
dropout_pre_lin=0.0, dropout_output=0.0):
super(LinearSum, self).__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = x0 + x1
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dims': [4, 4], 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1200
x1 = xindex // 1200
tmp0 = tl.load(in_ptr0 + (x0 + 1216 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 1216 * x1), xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp7 = tmp5 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp9 = tmp4 + tmp8
tmp10 = 0.0
tmp11 = tmp8 <= tmp10
tmp12 = tmp4 <= tmp10
tl.store(out_ptr0 + (x0 + 1216 * x1), tmp9, xmask)
tl.store(out_ptr1 + (x0 + 1280 * x1), tmp11, xmask)
tl.store(out_ptr2 + (x0 + 1280 * x1), tmp12, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1200, 4), (4, 1))
assert_size_stride(primals_3, (1200,), (1,))
assert_size_stride(primals_4, (1200, 4), (4, 1))
assert_size_stride(primals_5, (1200,), (1,))
assert_size_stride(primals_6, (4, 1200), (1200, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1200), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 64
), reinterpret_tensor(primals_4, (4, 1200), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 1200), (4864, 1216, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1200), (5120, 1280, 1), torch.bool)
buf7 = empty_strided_cuda((4, 4, 1200), (5120, 1280, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(19200)](buf0,
primals_3, buf1, primals_5, buf2, buf6, buf7, 19200, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
del buf1
del primals_3
del primals_5
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0
), reinterpret_tensor(primals_6, (1200, 4), (1, 1200), 0), out=buf3
)
buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf4,
primals_7, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 64
), reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0
), buf5, primals_6, buf6, buf7
class LinearSumNew(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input=
'relu', activ_output='relu', normalize=False, dropout_input=0.0,
dropout_pre_lin=0.0, dropout_output=0.0):
super(LinearSumNew, self).__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, input_0):
primals_2 = self.linear0.weight
primals_3 = self.linear0.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear_out.weight
primals_7 = self.linear_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JoannaLXY/block.bootstrap.pytorch
|
LinearSum
| false
| 11,560
|
[
"BSD-3-Clause"
] | 0
|
42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
https://github.com/JoannaLXY/block.bootstrap.pytorch/tree/42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
MFB
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MFB(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2,
activ_input='relu', activ_output='relu', normalize=False,
dropout_input=0.0, dropout_pre_norm=0.0, dropout_output=0.0):
super(MFB, self).__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.factor = factor
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_norm = dropout_pre_norm
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = x0 * x1
if self.dropout_pre_norm > 0:
z = F.dropout(z, p=self.dropout_pre_norm, training=self.training)
z = z.view(z.size(0), self.mm_dim, self.factor)
z = z.sum(2)
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dims': [4, 4], 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1200
x1 = xindex // 1200
tmp0 = tl.load(in_ptr0 + 2 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 2 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = tmp7 * tmp9
tmp11 = tmp5 + tmp10
tl.store(out_ptr0 + (x0 + 1216 * x1), tmp11, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2400, 4), (4, 1))
assert_size_stride(primals_3, (2400,), (1,))
assert_size_stride(primals_4, (2400, 4), (4, 1))
assert_size_stride(primals_5, (2400,), (1,))
assert_size_stride(primals_6, (4, 1200), (1200, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2400), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 16), reinterpret_tensor(primals_4, (4, 2400), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(4800)](buf0, buf1, buf2, 4800, XBLOCK=
256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (1200, 4), (1,
1200), 0), out=buf3)
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf4,
primals_7, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_7
return buf4, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), buf1, buf2, buf5, primals_6
class MFBNew(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2,
activ_input='relu', activ_output='relu', normalize=False,
dropout_input=0.0, dropout_pre_norm=0.0, dropout_output=0.0):
super(MFBNew, self).__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.factor = factor
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_norm = dropout_pre_norm
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, input_0):
primals_2 = self.linear0.weight
primals_3 = self.linear0.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear_out.weight
primals_7 = self.linear_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JoannaLXY/block.bootstrap.pytorch
|
MFB
| false
| 11,561
|
[
"BSD-3-Clause"
] | 0
|
42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
https://github.com/JoannaLXY/block.bootstrap.pytorch/tree/42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
MFH
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MFH(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2,
activ_input='relu', activ_output='relu', normalize=False,
dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0):
super(MFH, self).__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.factor = factor
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0_0 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1_0 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear0_1 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1_1 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear_out = nn.Linear(mm_dim * 2, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, x):
x0 = self.linear0_0(x[0])
x1 = self.linear1_0(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z_0_skip = x0 * x1
if self.dropout_pre_lin:
z_0_skip = F.dropout(z_0_skip, p=self.dropout_pre_lin, training
=self.training)
z_0 = z_0_skip.view(z_0_skip.size(0), self.mm_dim, self.factor)
z_0 = z_0.sum(2)
if self.normalize:
z_0 = torch.sqrt(F.relu(z_0)) - torch.sqrt(F.relu(-z_0))
z_0 = F.normalize(z_0, p=2)
x0 = self.linear0_1(x[0])
x1 = self.linear1_1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z_1 = x0 * x1 * z_0_skip
if self.dropout_pre_lin > 0:
z_1 = F.dropout(z_1, p=self.dropout_pre_lin, training=self.training
)
z_1 = z_1.view(z_1.size(0), self.mm_dim, self.factor)
z_1 = z_1.sum(2)
if self.normalize:
z_1 = torch.sqrt(F.relu(z_1)) - torch.sqrt(F.relu(-z_1))
z_1 = F.normalize(z_1, p=2)
cat_dim = z_0.dim() - 1
z = torch.cat([z_0, z_1], cat_dim)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dims': [4, 4], 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2400
x1 = xindex // 2400
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1200, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x0 + 2400 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tl.load(in_ptr1 + (2 * x0 + 2400 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp10 = tmp7 * tmp9
tmp11 = tl.load(in_ptr0 + (1 + 2 * x0 + 2400 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp13 = tl.load(in_ptr1 + (1 + 2 * x0 + 2400 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp15 = tmp12 * tmp14
tmp16 = tmp10 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp4, tmp16, tmp17)
tmp19 = tmp0 >= tmp3
tl.full([1], 2400, tl.int64)
tmp22 = tl.load(in_ptr2 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp6, tmp22)
tmp24 = tl.load(in_ptr3 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp6, tmp24)
tmp26 = tmp23 * tmp25
tmp27 = tl.load(in_ptr0 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = triton_helpers.maximum(tmp6, tmp27)
tmp29 = tl.load(in_ptr1 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = triton_helpers.maximum(tmp6, tmp29)
tmp31 = tmp28 * tmp30
tmp32 = tmp26 * tmp31
tmp33 = tl.load(in_ptr2 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 &
xmask, eviction_policy='evict_last', other=0.0)
tmp34 = triton_helpers.maximum(tmp6, tmp33)
tmp35 = tl.load(in_ptr3 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 &
xmask, eviction_policy='evict_last', other=0.0)
tmp36 = triton_helpers.maximum(tmp6, tmp35)
tmp37 = tmp34 * tmp36
tmp38 = tl.load(in_ptr0 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = triton_helpers.maximum(tmp6, tmp38)
tmp40 = tl.load(in_ptr1 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 &
xmask, eviction_policy='evict_last', other=0.0)
tmp41 = triton_helpers.maximum(tmp6, tmp40)
tmp42 = tmp39 * tmp41
tmp43 = tmp37 * tmp42
tmp44 = tmp32 + tmp43
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp19, tmp44, tmp45)
tmp47 = tl.where(tmp4, tmp18, tmp46)
tl.store(out_ptr0 + x2, tmp47, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2400, 4), (4, 1))
assert_size_stride(primals_3, (2400,), (1,))
assert_size_stride(primals_4, (2400, 4), (4, 1))
assert_size_stride(primals_5, (2400,), (1,))
assert_size_stride(primals_6, (2400, 4), (4, 1))
assert_size_stride(primals_7, (2400,), (1,))
assert_size_stride(primals_8, (2400, 4), (4, 1))
assert_size_stride(primals_9, (2400,), (1,))
assert_size_stride(primals_10, (4, 2400), (2400, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2400), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 16), reinterpret_tensor(primals_4, (4, 2400), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_6, (4, 2400), (1, 4),
0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 16), reinterpret_tensor(primals_8, (4, 2400), (1, 4),
0), alpha=1, beta=1, out=buf3)
del primals_8
del primals_9
buf4 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(9600)](buf0, buf1, buf2, buf3, buf4,
9600, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_10, (2400, 4), (
1, 2400), 0), out=buf5)
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16)](buf6,
primals_11, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
return buf6, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), buf1, buf2, buf3, buf4, buf7, primals_10
class MFHNew(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2,
activ_input='relu', activ_output='relu', normalize=False,
dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0):
super(MFHNew, self).__init__()
self.input_dims = input_dims
self.output_dim = output_dim
self.mm_dim = mm_dim
self.factor = factor
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0_0 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1_0 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear0_1 = nn.Linear(input_dims[0], mm_dim * factor)
self.linear1_1 = nn.Linear(input_dims[1], mm_dim * factor)
self.linear_out = nn.Linear(mm_dim * 2, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, input_0):
primals_2 = self.linear0_0.weight
primals_3 = self.linear0_0.bias
primals_4 = self.linear1_0.weight
primals_5 = self.linear1_0.bias
primals_6 = self.linear0_1.weight
primals_7 = self.linear0_1.bias
primals_8 = self.linear1_1.weight
primals_9 = self.linear1_1.bias
primals_10 = self.linear_out.weight
primals_11 = self.linear_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
JoannaLXY/block.bootstrap.pytorch
|
MFH
| false
| 11,562
|
[
"BSD-3-Clause"
] | 0
|
42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
https://github.com/JoannaLXY/block.bootstrap.pytorch/tree/42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
BinaryExpAbs
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpAbs(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, x):
return torch.exp(-self.beta * torch.abs(x[0] - x[1]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_exp_mul_neg_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = -tmp5
tmp7 = tmp6 * tmp3
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_exp_mul_neg_sub_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
return buf1, buf0, buf1
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpAbsNew(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Johnsonms/NNI_master
|
BinaryExpAbs
| false
| 11,563
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BinaryMul
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMul(nn.Module):
def forward(self, x):
return x[0] * x[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMulNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinaryMul
| false
| 11,564
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
NetVLAD
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from sklearn.neighbors import NearestNeighbors
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128, normalize_input=True,
vladv2=False):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=
vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
if self.vladv2 is False:
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha *
clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
knn = NearestNeighbors(n_jobs=-1)
knn.fit(traindescs)
del traindescs
dsSq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, dsSq
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.
centroids).unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm
(dim=1))
def forward(self, x):
N, C = x.shape[:2]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1)
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1)
vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout
=x.layout, device=x.device)
for C in range(self.num_clusters):
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3
) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), -
1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:, C:C + 1, :].unsqueeze(2)
vlad[:, C:C + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2)
vlad = vlad.view(x.size(0), -1)
vlad = F.normalize(vlad, p=2, dim=1)
return vlad
def get_inputs():
return [torch.rand([4, 128, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
from sklearn.neighbors import NearestNeighbors
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = xindex // 4096
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13,
out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19,
out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25,
out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31,
out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37,
out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43,
out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49,
out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55,
out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61,
out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 524288
x1 = xindex // 4096 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last')
tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last')
tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last')
tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last')
tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last')
tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last')
tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last')
tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last')
tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last')
tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last')
tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last')
tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last')
tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last')
tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last')
tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last')
tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last')
tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last')
tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last')
tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last')
tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last')
tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last')
tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last')
tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last')
tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last')
tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last')
tmp2 = libdevice.sqrt(tmp1)
tmp3 = 1e-12
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 / tmp4
tmp7 = tmp5 - tmp6
tmp9 = tmp5 - tmp8
tmp11 = tmp5 - tmp10
tmp13 = tmp5 - tmp12
tmp15 = tmp5 - tmp14
tmp17 = tmp5 - tmp16
tmp19 = tmp5 - tmp18
tmp21 = tmp5 - tmp20
tmp23 = tmp5 - tmp22
tmp25 = tmp5 - tmp24
tmp27 = tmp5 - tmp26
tmp29 = tmp5 - tmp28
tmp31 = tmp5 - tmp30
tmp33 = tmp5 - tmp32
tmp35 = tmp5 - tmp34
tmp37 = tmp5 - tmp36
tmp39 = tmp5 - tmp38
tmp41 = tmp5 - tmp40
tmp43 = tmp5 - tmp42
tmp45 = tmp5 - tmp44
tmp47 = tmp5 - tmp46
tmp49 = tmp5 - tmp48
tmp51 = tmp5 - tmp50
tmp53 = tmp5 - tmp52
tmp55 = tmp5 - tmp54
tmp57 = tmp5 - tmp56
tmp59 = tmp5 - tmp58
tmp61 = tmp5 - tmp60
tmp63 = tmp5 - tmp62
tmp65 = tmp5 - tmp64
tmp67 = tmp5 - tmp66
tmp69 = tmp5 - tmp68
tmp71 = tmp5 - tmp70
tmp73 = tmp5 - tmp72
tmp75 = tmp5 - tmp74
tmp77 = tmp5 - tmp76
tmp79 = tmp5 - tmp78
tmp81 = tmp5 - tmp80
tmp83 = tmp5 - tmp82
tmp85 = tmp5 - tmp84
tmp87 = tmp5 - tmp86
tmp89 = tmp5 - tmp88
tmp91 = tmp5 - tmp90
tmp93 = tmp5 - tmp92
tmp95 = tmp5 - tmp94
tmp97 = tmp5 - tmp96
tmp99 = tmp5 - tmp98
tmp101 = tmp5 - tmp100
tmp103 = tmp5 - tmp102
tmp105 = tmp5 - tmp104
tmp107 = tmp5 - tmp106
tmp109 = tmp5 - tmp108
tmp111 = tmp5 - tmp110
tmp113 = tmp5 - tmp112
tmp115 = tmp5 - tmp114
tmp117 = tmp5 - tmp116
tmp119 = tmp5 - tmp118
tmp121 = tmp5 - tmp120
tmp123 = tmp5 - tmp122
tmp125 = tmp5 - tmp124
tmp127 = tmp5 - tmp126
tmp129 = tmp5 - tmp128
tmp131 = tmp5 - tmp130
tl.store(out_ptr0 + x3, tmp5, None)
tl.store(out_ptr1 + x3, tmp7, None)
tl.store(out_ptr2 + x3, tmp9, None)
tl.store(out_ptr3 + x3, tmp11, None)
tl.store(out_ptr4 + x3, tmp13, None)
tl.store(out_ptr5 + x3, tmp15, None)
tl.store(out_ptr6 + x3, tmp17, None)
tl.store(out_ptr7 + x3, tmp19, None)
tl.store(out_ptr8 + x3, tmp21, None)
tl.store(out_ptr9 + x3, tmp23, None)
tl.store(out_ptr10 + x3, tmp25, None)
tl.store(out_ptr11 + x3, tmp27, None)
tl.store(out_ptr12 + x3, tmp29, None)
tl.store(out_ptr13 + x3, tmp31, None)
tl.store(out_ptr14 + x3, tmp33, None)
tl.store(out_ptr15 + x3, tmp35, None)
tl.store(out_ptr16 + x3, tmp37, None)
tl.store(out_ptr17 + x3, tmp39, None)
tl.store(out_ptr18 + x3, tmp41, None)
tl.store(out_ptr19 + x3, tmp43, None)
tl.store(out_ptr20 + x3, tmp45, None)
tl.store(out_ptr21 + x3, tmp47, None)
tl.store(out_ptr22 + x3, tmp49, None)
tl.store(out_ptr23 + x3, tmp51, None)
tl.store(out_ptr24 + x3, tmp53, None)
tl.store(out_ptr25 + x3, tmp55, None)
tl.store(out_ptr26 + x3, tmp57, None)
tl.store(out_ptr27 + x3, tmp59, None)
tl.store(out_ptr28 + x3, tmp61, None)
tl.store(out_ptr29 + x3, tmp63, None)
tl.store(out_ptr30 + x3, tmp65, None)
tl.store(out_ptr31 + x3, tmp67, None)
tl.store(out_ptr32 + x3, tmp69, None)
tl.store(out_ptr33 + x3, tmp71, None)
tl.store(out_ptr34 + x3, tmp73, None)
tl.store(out_ptr35 + x3, tmp75, None)
tl.store(out_ptr36 + x3, tmp77, None)
tl.store(out_ptr37 + x3, tmp79, None)
tl.store(out_ptr38 + x3, tmp81, None)
tl.store(out_ptr39 + x3, tmp83, None)
tl.store(out_ptr40 + x3, tmp85, None)
tl.store(out_ptr41 + x3, tmp87, None)
tl.store(out_ptr42 + x3, tmp89, None)
tl.store(out_ptr43 + x3, tmp91, None)
tl.store(out_ptr44 + x3, tmp93, None)
tl.store(out_ptr45 + x3, tmp95, None)
tl.store(out_ptr46 + x3, tmp97, None)
tl.store(out_ptr47 + x3, tmp99, None)
tl.store(out_ptr48 + x3, tmp101, None)
tl.store(out_ptr49 + x3, tmp103, None)
tl.store(out_ptr50 + x3, tmp105, None)
tl.store(out_ptr51 + x3, tmp107, None)
tl.store(out_ptr52 + x3, tmp109, None)
tl.store(out_ptr53 + x3, tmp111, None)
tl.store(out_ptr54 + x3, tmp113, None)
tl.store(out_ptr55 + x3, tmp115, None)
tl.store(out_ptr56 + x3, tmp117, None)
tl.store(out_ptr57 + x3, tmp119, None)
tl.store(out_ptr58 + x3, tmp121, None)
tl.store(out_ptr59 + x3, tmp123, None)
tl.store(out_ptr60 + x3, tmp125, None)
tl.store(out_ptr61 + x3, tmp127, None)
tl.store(out_ptr62 + x3, tmp129, None)
tl.store(out_ptr63 + x3, tmp131, None)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = triton_helpers.max2(tmp1, 1)[:, None]
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, None)
tl.store(out_ptr1 + x3, tmp8, None)
@triton.jit
def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17,
in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24,
in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31,
in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12,
out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18,
out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24,
out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 128
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
x1 = xindex // 128
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp2 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp15 = tmp14 - tmp4
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 / tmp7
tmp18 = tmp13 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = _tmp20 + tmp19
_tmp20 = tl.where(rmask & xmask, tmp21, _tmp20)
tmp24 = tmp23 - tmp4
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp25 / tmp7
tmp27 = tmp22 * tmp26
tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK])
tmp30 = _tmp29 + tmp28
_tmp29 = tl.where(rmask & xmask, tmp30, _tmp29)
tmp33 = tmp32 - tmp4
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp34 / tmp7
tmp36 = tmp31 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = _tmp38 + tmp37
_tmp38 = tl.where(rmask & xmask, tmp39, _tmp38)
tmp42 = tmp41 - tmp4
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp43 / tmp7
tmp45 = tmp40 * tmp44
tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK])
tmp48 = _tmp47 + tmp46
_tmp47 = tl.where(rmask & xmask, tmp48, _tmp47)
tmp51 = tmp50 - tmp4
tmp52 = tl_math.exp(tmp51)
tmp53 = tmp52 / tmp7
tmp54 = tmp49 * tmp53
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp57 = _tmp56 + tmp55
_tmp56 = tl.where(rmask & xmask, tmp57, _tmp56)
tmp60 = tmp59 - tmp4
tmp61 = tl_math.exp(tmp60)
tmp62 = tmp61 / tmp7
tmp63 = tmp58 * tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = _tmp65 + tmp64
_tmp65 = tl.where(rmask & xmask, tmp66, _tmp65)
tmp69 = tmp68 - tmp4
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 / tmp7
tmp72 = tmp67 * tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = _tmp74 + tmp73
_tmp74 = tl.where(rmask & xmask, tmp75, _tmp74)
tmp78 = tmp77 - tmp4
tmp79 = tl_math.exp(tmp78)
tmp80 = tmp79 / tmp7
tmp81 = tmp76 * tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = _tmp83 + tmp82
_tmp83 = tl.where(rmask & xmask, tmp84, _tmp83)
tmp87 = tmp86 - tmp4
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp88 / tmp7
tmp90 = tmp85 * tmp89
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp93 = _tmp92 + tmp91
_tmp92 = tl.where(rmask & xmask, tmp93, _tmp92)
tmp96 = tmp95 - tmp4
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp97 / tmp7
tmp99 = tmp94 * tmp98
tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK])
tmp102 = _tmp101 + tmp100
_tmp101 = tl.where(rmask & xmask, tmp102, _tmp101)
tmp105 = tmp104 - tmp4
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp106 / tmp7
tmp108 = tmp103 * tmp107
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp111 = _tmp110 + tmp109
_tmp110 = tl.where(rmask & xmask, tmp111, _tmp110)
tmp114 = tmp113 - tmp4
tmp115 = tl_math.exp(tmp114)
tmp116 = tmp115 / tmp7
tmp117 = tmp112 * tmp116
tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK])
tmp120 = _tmp119 + tmp118
_tmp119 = tl.where(rmask & xmask, tmp120, _tmp119)
tmp123 = tmp122 - tmp4
tmp124 = tl_math.exp(tmp123)
tmp125 = tmp124 / tmp7
tmp126 = tmp121 * tmp125
tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK])
tmp129 = _tmp128 + tmp127
_tmp128 = tl.where(rmask & xmask, tmp129, _tmp128)
tmp132 = tmp131 - tmp4
tmp133 = tl_math.exp(tmp132)
tmp134 = tmp133 / tmp7
tmp135 = tmp130 * tmp134
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp138 = _tmp137 + tmp136
_tmp137 = tl.where(rmask & xmask, tmp138, _tmp137)
tmp141 = tmp140 - tmp4
tmp142 = tl_math.exp(tmp141)
tmp143 = tmp142 / tmp7
tmp144 = tmp139 * tmp143
tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK])
tmp147 = _tmp146 + tmp145
_tmp146 = tl.where(rmask & xmask, tmp147, _tmp146)
tmp150 = tmp149 - tmp4
tmp151 = tl_math.exp(tmp150)
tmp152 = tmp151 / tmp7
tmp153 = tmp148 * tmp152
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = _tmp155 + tmp154
_tmp155 = tl.where(rmask & xmask, tmp156, _tmp155)
tmp159 = tmp158 - tmp4
tmp160 = tl_math.exp(tmp159)
tmp161 = tmp160 / tmp7
tmp162 = tmp157 * tmp161
tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK])
tmp165 = _tmp164 + tmp163
_tmp164 = tl.where(rmask & xmask, tmp165, _tmp164)
tmp168 = tmp167 - tmp4
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp169 / tmp7
tmp171 = tmp166 * tmp170
tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK])
tmp174 = _tmp173 + tmp172
_tmp173 = tl.where(rmask & xmask, tmp174, _tmp173)
tmp177 = tmp176 - tmp4
tmp178 = tl_math.exp(tmp177)
tmp179 = tmp178 / tmp7
tmp180 = tmp175 * tmp179
tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK])
tmp183 = _tmp182 + tmp181
_tmp182 = tl.where(rmask & xmask, tmp183, _tmp182)
tmp186 = tmp185 - tmp4
tmp187 = tl_math.exp(tmp186)
tmp188 = tmp187 / tmp7
tmp189 = tmp184 * tmp188
tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK])
tmp192 = _tmp191 + tmp190
_tmp191 = tl.where(rmask & xmask, tmp192, _tmp191)
tmp195 = tmp194 - tmp4
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp196 / tmp7
tmp198 = tmp193 * tmp197
tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK])
tmp201 = _tmp200 + tmp199
_tmp200 = tl.where(rmask & xmask, tmp201, _tmp200)
tmp204 = tmp203 - tmp4
tmp205 = tl_math.exp(tmp204)
tmp206 = tmp205 / tmp7
tmp207 = tmp202 * tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = _tmp209 + tmp208
_tmp209 = tl.where(rmask & xmask, tmp210, _tmp209)
tmp213 = tmp212 - tmp4
tmp214 = tl_math.exp(tmp213)
tmp215 = tmp214 / tmp7
tmp216 = tmp211 * tmp215
tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK])
tmp219 = _tmp218 + tmp217
_tmp218 = tl.where(rmask & xmask, tmp219, _tmp218)
tmp222 = tmp221 - tmp4
tmp223 = tl_math.exp(tmp222)
tmp224 = tmp223 / tmp7
tmp225 = tmp220 * tmp224
tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK])
tmp228 = _tmp227 + tmp226
_tmp227 = tl.where(rmask & xmask, tmp228, _tmp227)
tmp231 = tmp230 - tmp4
tmp232 = tl_math.exp(tmp231)
tmp233 = tmp232 / tmp7
tmp234 = tmp229 * tmp233
tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK])
tmp237 = _tmp236 + tmp235
_tmp236 = tl.where(rmask & xmask, tmp237, _tmp236)
tmp240 = tmp239 - tmp4
tmp241 = tl_math.exp(tmp240)
tmp242 = tmp241 / tmp7
tmp243 = tmp238 * tmp242
tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK])
tmp246 = _tmp245 + tmp244
_tmp245 = tl.where(rmask & xmask, tmp246, _tmp245)
tmp249 = tmp248 - tmp4
tmp250 = tl_math.exp(tmp249)
tmp251 = tmp250 / tmp7
tmp252 = tmp247 * tmp251
tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK])
tmp255 = _tmp254 + tmp253
_tmp254 = tl.where(rmask & xmask, tmp255, _tmp254)
tmp258 = tmp257 - tmp4
tmp259 = tl_math.exp(tmp258)
tmp260 = tmp259 / tmp7
tmp261 = tmp256 * tmp260
tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK])
tmp264 = _tmp263 + tmp262
_tmp263 = tl.where(rmask & xmask, tmp264, _tmp263)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + x3, tmp11, xmask)
tmp20 = tl.sum(_tmp20, 1)[:, None]
tl.store(out_ptr1 + x3, tmp20, xmask)
tmp29 = tl.sum(_tmp29, 1)[:, None]
tl.store(out_ptr2 + x3, tmp29, xmask)
tmp38 = tl.sum(_tmp38, 1)[:, None]
tl.store(out_ptr3 + x3, tmp38, xmask)
tmp47 = tl.sum(_tmp47, 1)[:, None]
tl.store(out_ptr4 + x3, tmp47, xmask)
tmp56 = tl.sum(_tmp56, 1)[:, None]
tl.store(out_ptr5 + x3, tmp56, xmask)
tmp65 = tl.sum(_tmp65, 1)[:, None]
tl.store(out_ptr6 + x3, tmp65, xmask)
tmp74 = tl.sum(_tmp74, 1)[:, None]
tl.store(out_ptr7 + x3, tmp74, xmask)
tmp83 = tl.sum(_tmp83, 1)[:, None]
tl.store(out_ptr8 + x3, tmp83, xmask)
tmp92 = tl.sum(_tmp92, 1)[:, None]
tl.store(out_ptr9 + x3, tmp92, xmask)
tmp101 = tl.sum(_tmp101, 1)[:, None]
tl.store(out_ptr10 + x3, tmp101, xmask)
tmp110 = tl.sum(_tmp110, 1)[:, None]
tl.store(out_ptr11 + x3, tmp110, xmask)
tmp119 = tl.sum(_tmp119, 1)[:, None]
tl.store(out_ptr12 + x3, tmp119, xmask)
tmp128 = tl.sum(_tmp128, 1)[:, None]
tl.store(out_ptr13 + x3, tmp128, xmask)
tmp137 = tl.sum(_tmp137, 1)[:, None]
tl.store(out_ptr14 + x3, tmp137, xmask)
tmp146 = tl.sum(_tmp146, 1)[:, None]
tl.store(out_ptr15 + x3, tmp146, xmask)
tmp155 = tl.sum(_tmp155, 1)[:, None]
tl.store(out_ptr16 + x3, tmp155, xmask)
tmp164 = tl.sum(_tmp164, 1)[:, None]
tl.store(out_ptr17 + x3, tmp164, xmask)
tmp173 = tl.sum(_tmp173, 1)[:, None]
tl.store(out_ptr18 + x3, tmp173, xmask)
tmp182 = tl.sum(_tmp182, 1)[:, None]
tl.store(out_ptr19 + x3, tmp182, xmask)
tmp191 = tl.sum(_tmp191, 1)[:, None]
tl.store(out_ptr20 + x3, tmp191, xmask)
tmp200 = tl.sum(_tmp200, 1)[:, None]
tl.store(out_ptr21 + x3, tmp200, xmask)
tmp209 = tl.sum(_tmp209, 1)[:, None]
tl.store(out_ptr22 + x3, tmp209, xmask)
tmp218 = tl.sum(_tmp218, 1)[:, None]
tl.store(out_ptr23 + x3, tmp218, xmask)
tmp227 = tl.sum(_tmp227, 1)[:, None]
tl.store(out_ptr24 + x3, tmp227, xmask)
tmp236 = tl.sum(_tmp236, 1)[:, None]
tl.store(out_ptr25 + x3, tmp236, xmask)
tmp245 = tl.sum(_tmp245, 1)[:, None]
tl.store(out_ptr26 + x3, tmp245, xmask)
tmp254 = tl.sum(_tmp254, 1)[:, None]
tl.store(out_ptr27 + x3, tmp254, xmask)
tmp263 = tl.sum(_tmp263, 1)[:, None]
tl.store(out_ptr28 + x3, tmp263, xmask)
@triton.jit
def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18,
in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25,
in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14,
out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20,
out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26,
out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = xindex // 128
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp67 = tmp66 - tmp2
tmp68 = tl_math.exp(tmp67)
tmp69 = tmp68 / tmp5
tmp70 = tmp65 * tmp69
tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK])
tmp73 = _tmp72 + tmp71
_tmp72 = tl.where(rmask & xmask, tmp73, _tmp72)
tmp76 = tmp75 - tmp2
tmp77 = tl_math.exp(tmp76)
tmp78 = tmp77 / tmp5
tmp79 = tmp74 * tmp78
tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK])
tmp82 = _tmp81 + tmp80
_tmp81 = tl.where(rmask & xmask, tmp82, _tmp81)
tmp85 = tmp84 - tmp2
tmp86 = tl_math.exp(tmp85)
tmp87 = tmp86 / tmp5
tmp88 = tmp83 * tmp87
tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK])
tmp91 = _tmp90 + tmp89
_tmp90 = tl.where(rmask & xmask, tmp91, _tmp90)
tmp94 = tmp93 - tmp2
tmp95 = tl_math.exp(tmp94)
tmp96 = tmp95 / tmp5
tmp97 = tmp92 * tmp96
tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK])
tmp100 = _tmp99 + tmp98
_tmp99 = tl.where(rmask & xmask, tmp100, _tmp99)
tmp103 = tmp102 - tmp2
tmp104 = tl_math.exp(tmp103)
tmp105 = tmp104 / tmp5
tmp106 = tmp101 * tmp105
tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK])
tmp109 = _tmp108 + tmp107
_tmp108 = tl.where(rmask & xmask, tmp109, _tmp108)
tmp112 = tmp111 - tmp2
tmp113 = tl_math.exp(tmp112)
tmp114 = tmp113 / tmp5
tmp115 = tmp110 * tmp114
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp118 = _tmp117 + tmp116
_tmp117 = tl.where(rmask & xmask, tmp118, _tmp117)
tmp121 = tmp120 - tmp2
tmp122 = tl_math.exp(tmp121)
tmp123 = tmp122 / tmp5
tmp124 = tmp119 * tmp123
tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK])
tmp127 = _tmp126 + tmp125
_tmp126 = tl.where(rmask & xmask, tmp127, _tmp126)
tmp130 = tmp129 - tmp2
tmp131 = tl_math.exp(tmp130)
tmp132 = tmp131 / tmp5
tmp133 = tmp128 * tmp132
tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK])
tmp136 = _tmp135 + tmp134
_tmp135 = tl.where(rmask & xmask, tmp136, _tmp135)
tmp139 = tmp138 - tmp2
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp140 / tmp5
tmp142 = tmp137 * tmp141
tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK])
tmp145 = _tmp144 + tmp143
_tmp144 = tl.where(rmask & xmask, tmp145, _tmp144)
tmp148 = tmp147 - tmp2
tmp149 = tl_math.exp(tmp148)
tmp150 = tmp149 / tmp5
tmp151 = tmp146 * tmp150
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp154 = _tmp153 + tmp152
_tmp153 = tl.where(rmask & xmask, tmp154, _tmp153)
tmp157 = tmp156 - tmp2
tmp158 = tl_math.exp(tmp157)
tmp159 = tmp158 / tmp5
tmp160 = tmp155 * tmp159
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = _tmp162 + tmp161
_tmp162 = tl.where(rmask & xmask, tmp163, _tmp162)
tmp166 = tmp165 - tmp2
tmp167 = tl_math.exp(tmp166)
tmp168 = tmp167 / tmp5
tmp169 = tmp164 * tmp168
tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK])
tmp172 = _tmp171 + tmp170
_tmp171 = tl.where(rmask & xmask, tmp172, _tmp171)
tmp175 = tmp174 - tmp2
tmp176 = tl_math.exp(tmp175)
tmp177 = tmp176 / tmp5
tmp178 = tmp173 * tmp177
tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK])
tmp181 = _tmp180 + tmp179
_tmp180 = tl.where(rmask & xmask, tmp181, _tmp180)
tmp184 = tmp183 - tmp2
tmp185 = tl_math.exp(tmp184)
tmp186 = tmp185 / tmp5
tmp187 = tmp182 * tmp186
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp190 = _tmp189 + tmp188
_tmp189 = tl.where(rmask & xmask, tmp190, _tmp189)
tmp193 = tmp192 - tmp2
tmp194 = tl_math.exp(tmp193)
tmp195 = tmp194 / tmp5
tmp196 = tmp191 * tmp195
tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK])
tmp199 = _tmp198 + tmp197
_tmp198 = tl.where(rmask & xmask, tmp199, _tmp198)
tmp202 = tmp201 - tmp2
tmp203 = tl_math.exp(tmp202)
tmp204 = tmp203 / tmp5
tmp205 = tmp200 * tmp204
tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK])
tmp208 = _tmp207 + tmp206
_tmp207 = tl.where(rmask & xmask, tmp208, _tmp207)
tmp211 = tmp210 - tmp2
tmp212 = tl_math.exp(tmp211)
tmp213 = tmp212 / tmp5
tmp214 = tmp209 * tmp213
tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK])
tmp217 = _tmp216 + tmp215
_tmp216 = tl.where(rmask & xmask, tmp217, _tmp216)
tmp220 = tmp219 - tmp2
tmp221 = tl_math.exp(tmp220)
tmp222 = tmp221 / tmp5
tmp223 = tmp218 * tmp222
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp226 = _tmp225 + tmp224
_tmp225 = tl.where(rmask & xmask, tmp226, _tmp225)
tmp229 = tmp228 - tmp2
tmp230 = tl_math.exp(tmp229)
tmp231 = tmp230 / tmp5
tmp232 = tmp227 * tmp231
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp235 = _tmp234 + tmp233
_tmp234 = tl.where(rmask & xmask, tmp235, _tmp234)
tmp238 = tmp237 - tmp2
tmp239 = tl_math.exp(tmp238)
tmp240 = tmp239 / tmp5
tmp241 = tmp236 * tmp240
tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK])
tmp244 = _tmp243 + tmp242
_tmp243 = tl.where(rmask & xmask, tmp244, _tmp243)
tmp247 = tmp246 - tmp2
tmp248 = tl_math.exp(tmp247)
tmp249 = tmp248 / tmp5
tmp250 = tmp245 * tmp249
tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK])
tmp253 = _tmp252 + tmp251
_tmp252 = tl.where(rmask & xmask, tmp253, _tmp252)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + x3, tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + x3, tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + x3, tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + x3, tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + x3, tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + x3, tmp63, xmask)
tmp72 = tl.sum(_tmp72, 1)[:, None]
tl.store(out_ptr7 + x3, tmp72, xmask)
tmp81 = tl.sum(_tmp81, 1)[:, None]
tl.store(out_ptr8 + x3, tmp81, xmask)
tmp90 = tl.sum(_tmp90, 1)[:, None]
tl.store(out_ptr9 + x3, tmp90, xmask)
tmp99 = tl.sum(_tmp99, 1)[:, None]
tl.store(out_ptr10 + x3, tmp99, xmask)
tmp108 = tl.sum(_tmp108, 1)[:, None]
tl.store(out_ptr11 + x3, tmp108, xmask)
tmp117 = tl.sum(_tmp117, 1)[:, None]
tl.store(out_ptr12 + x3, tmp117, xmask)
tmp126 = tl.sum(_tmp126, 1)[:, None]
tl.store(out_ptr13 + x3, tmp126, xmask)
tmp135 = tl.sum(_tmp135, 1)[:, None]
tl.store(out_ptr14 + x3, tmp135, xmask)
tmp144 = tl.sum(_tmp144, 1)[:, None]
tl.store(out_ptr15 + x3, tmp144, xmask)
tmp153 = tl.sum(_tmp153, 1)[:, None]
tl.store(out_ptr16 + x3, tmp153, xmask)
tmp162 = tl.sum(_tmp162, 1)[:, None]
tl.store(out_ptr17 + x3, tmp162, xmask)
tmp171 = tl.sum(_tmp171, 1)[:, None]
tl.store(out_ptr18 + x3, tmp171, xmask)
tmp180 = tl.sum(_tmp180, 1)[:, None]
tl.store(out_ptr19 + x3, tmp180, xmask)
tmp189 = tl.sum(_tmp189, 1)[:, None]
tl.store(out_ptr20 + x3, tmp189, xmask)
tmp198 = tl.sum(_tmp198, 1)[:, None]
tl.store(out_ptr21 + x3, tmp198, xmask)
tmp207 = tl.sum(_tmp207, 1)[:, None]
tl.store(out_ptr22 + x3, tmp207, xmask)
tmp216 = tl.sum(_tmp216, 1)[:, None]
tl.store(out_ptr23 + x3, tmp216, xmask)
tmp225 = tl.sum(_tmp225, 1)[:, None]
tl.store(out_ptr24 + x3, tmp225, xmask)
tmp234 = tl.sum(_tmp234, 1)[:, None]
tl.store(out_ptr25 + x3, tmp234, xmask)
tmp243 = tl.sum(_tmp243, 1)[:, None]
tl.store(out_ptr26 + x3, tmp243, xmask)
tmp252 = tl.sum(_tmp252, 1)[:, None]
tl.store(out_ptr27 + x3, tmp252, xmask)
@triton.jit
def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x1 = xindex // 128
_tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
_tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = _tmp9 + tmp8
_tmp9 = tl.where(rmask & xmask, tmp10, _tmp9)
tmp13 = tmp12 - tmp2
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tmp22 = tmp21 - tmp2
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp25 = tmp20 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = _tmp27 + tmp26
_tmp27 = tl.where(rmask & xmask, tmp28, _tmp27)
tmp31 = tmp30 - tmp2
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp34 = tmp29 * tmp33
tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK])
tmp37 = _tmp36 + tmp35
_tmp36 = tl.where(rmask & xmask, tmp37, _tmp36)
tmp40 = tmp39 - tmp2
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp41 / tmp5
tmp43 = tmp38 * tmp42
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = _tmp45 + tmp44
_tmp45 = tl.where(rmask & xmask, tmp46, _tmp45)
tmp49 = tmp48 - tmp2
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp50 / tmp5
tmp52 = tmp47 * tmp51
tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK])
tmp55 = _tmp54 + tmp53
_tmp54 = tl.where(rmask & xmask, tmp55, _tmp54)
tmp58 = tmp57 - tmp2
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp59 / tmp5
tmp61 = tmp56 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = _tmp63 + tmp62
_tmp63 = tl.where(rmask & xmask, tmp64, _tmp63)
tmp9 = tl.sum(_tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp9, xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tl.store(out_ptr1 + x3, tmp18, xmask)
tmp27 = tl.sum(_tmp27, 1)[:, None]
tl.store(out_ptr2 + x3, tmp27, xmask)
tmp36 = tl.sum(_tmp36, 1)[:, None]
tl.store(out_ptr3 + x3, tmp36, xmask)
tmp45 = tl.sum(_tmp45, 1)[:, None]
tl.store(out_ptr4 + x3, tmp45, xmask)
tmp54 = tl.sum(_tmp54, 1)[:, None]
tl.store(out_ptr5 + x3, tmp54, xmask)
tmp63 = tl.sum(_tmp63, 1)[:, None]
tl.store(out_ptr6 + x3, tmp63, xmask)
@triton.jit
def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12,
in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19,
in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26,
in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33,
in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40,
in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47,
in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54,
in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61,
in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 64
r2 = rindex
x1 = xindex // 64
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1, 1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy
='evict_last', other=0.0)
tmp7 = tl.full([1, 1], 3, tl.int64)
tmp8 = tmp0 >= tmp7
tmp9 = tmp0 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([1, 1], 2, tl.int64)
tmp13 = tmp0 >= tmp12
tmp14 = tmp0 < tmp7
tmp15 = tmp13 & tmp14
tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.full([1, 1], 1, tl.int64)
tmp18 = tmp0 >= tmp17
tmp19 = tmp0 < tmp12
tmp20 = tmp18 & tmp19
tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 < tmp17
tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = 0.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tl.where(tmp20, tmp21, tmp25)
tmp27 = tl.where(tmp15, tmp16, tmp26)
tmp28 = tl.where(tmp10, tmp11, tmp27)
tmp29 = tl.where(tmp5, tmp6, tmp28)
tmp30 = tl.full([1, 1], 8, tl.int64)
tmp31 = tmp0 >= tmp30
tmp32 = tl.full([1, 1], 9, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.full([1, 1], 7, tl.int64)
tmp37 = tmp0 >= tmp36
tmp38 = tmp0 < tmp30
tmp39 = tmp37 & tmp38
tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tl.full([1, 1], 6, tl.int64)
tmp42 = tmp0 >= tmp41
tmp43 = tmp0 < tmp36
tmp44 = tmp42 & tmp43
tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask,
eviction_policy='evict_last', other=0.0)
tmp46 = tmp0 >= tmp3
tmp47 = tmp0 < tmp41
tmp48 = tmp46 & tmp47
tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask,
eviction_policy='evict_last', other=0.0)
tmp50 = tl.where(tmp48, tmp49, tmp29)
tmp51 = tl.where(tmp44, tmp45, tmp50)
tmp52 = tl.where(tmp39, tmp40, tmp51)
tmp53 = tl.where(tmp34, tmp35, tmp52)
tmp54 = tl.full([1, 1], 12, tl.int64)
tmp55 = tmp0 >= tmp54
tmp56 = tl.full([1, 1], 13, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tmp55 & tmp57
tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask,
eviction_policy='evict_last', other=0.0)
tmp60 = tl.full([1, 1], 11, tl.int64)
tmp61 = tmp0 >= tmp60
tmp62 = tmp0 < tmp54
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tl.full([1, 1], 10, tl.int64)
tmp66 = tmp0 >= tmp65
tmp67 = tmp0 < tmp60
tmp68 = tmp66 & tmp67
tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask,
eviction_policy='evict_last', other=0.0)
tmp70 = tmp0 >= tmp32
tmp71 = tmp0 < tmp65
tmp72 = tmp70 & tmp71
tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp72, tmp73, tmp53)
tmp75 = tl.where(tmp68, tmp69, tmp74)
tmp76 = tl.where(tmp63, tmp64, tmp75)
tmp77 = tl.where(tmp58, tmp59, tmp76)
tmp78 = tl.full([1, 1], 16, tl.int64)
tmp79 = tmp0 >= tmp78
tmp80 = tl.full([1, 1], 17, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tmp79 & tmp81
tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask,
eviction_policy='evict_last', other=0.0)
tmp84 = tl.full([1, 1], 15, tl.int64)
tmp85 = tmp0 >= tmp84
tmp86 = tmp0 < tmp78
tmp87 = tmp85 & tmp86
tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full([1, 1], 14, tl.int64)
tmp90 = tmp0 >= tmp89
tmp91 = tmp0 < tmp84
tmp92 = tmp90 & tmp91
tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tmp0 >= tmp56
tmp95 = tmp0 < tmp89
tmp96 = tmp94 & tmp95
tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask,
eviction_policy='evict_last', other=0.0)
tmp98 = tl.where(tmp96, tmp97, tmp77)
tmp99 = tl.where(tmp92, tmp93, tmp98)
tmp100 = tl.where(tmp87, tmp88, tmp99)
tmp101 = tl.where(tmp82, tmp83, tmp100)
tmp102 = tl.full([1, 1], 20, tl.int64)
tmp103 = tmp0 >= tmp102
tmp104 = tl.full([1, 1], 21, tl.int64)
tmp105 = tmp0 < tmp104
tmp106 = tmp103 & tmp105
tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask,
eviction_policy='evict_last', other=0.0)
tmp108 = tl.full([1, 1], 19, tl.int64)
tmp109 = tmp0 >= tmp108
tmp110 = tmp0 < tmp102
tmp111 = tmp109 & tmp110
tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask,
eviction_policy='evict_last', other=0.0)
tmp113 = tl.full([1, 1], 18, tl.int64)
tmp114 = tmp0 >= tmp113
tmp115 = tmp0 < tmp108
tmp116 = tmp114 & tmp115
tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask,
eviction_policy='evict_last', other=0.0)
tmp118 = tmp0 >= tmp80
tmp119 = tmp0 < tmp113
tmp120 = tmp118 & tmp119
tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask,
eviction_policy='evict_last', other=0.0)
tmp122 = tl.where(tmp120, tmp121, tmp101)
tmp123 = tl.where(tmp116, tmp117, tmp122)
tmp124 = tl.where(tmp111, tmp112, tmp123)
tmp125 = tl.where(tmp106, tmp107, tmp124)
tmp126 = tl.full([1, 1], 24, tl.int64)
tmp127 = tmp0 >= tmp126
tmp128 = tl.full([1, 1], 25, tl.int64)
tmp129 = tmp0 < tmp128
tmp130 = tmp127 & tmp129
tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask,
eviction_policy='evict_last', other=0.0)
tmp132 = tl.full([1, 1], 23, tl.int64)
tmp133 = tmp0 >= tmp132
tmp134 = tmp0 < tmp126
tmp135 = tmp133 & tmp134
tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask,
eviction_policy='evict_last', other=0.0)
tmp137 = tl.full([1, 1], 22, tl.int64)
tmp138 = tmp0 >= tmp137
tmp139 = tmp0 < tmp132
tmp140 = tmp138 & tmp139
tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask,
eviction_policy='evict_last', other=0.0)
tmp142 = tmp0 >= tmp104
tmp143 = tmp0 < tmp137
tmp144 = tmp142 & tmp143
tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask,
eviction_policy='evict_last', other=0.0)
tmp146 = tl.where(tmp144, tmp145, tmp125)
tmp147 = tl.where(tmp140, tmp141, tmp146)
tmp148 = tl.where(tmp135, tmp136, tmp147)
tmp149 = tl.where(tmp130, tmp131, tmp148)
tmp150 = tl.full([1, 1], 28, tl.int64)
tmp151 = tmp0 >= tmp150
tmp152 = tl.full([1, 1], 29, tl.int64)
tmp153 = tmp0 < tmp152
tmp154 = tmp151 & tmp153
tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask,
eviction_policy='evict_last', other=0.0)
tmp156 = tl.full([1, 1], 27, tl.int64)
tmp157 = tmp0 >= tmp156
tmp158 = tmp0 < tmp150
tmp159 = tmp157 & tmp158
tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask,
eviction_policy='evict_last', other=0.0)
tmp161 = tl.full([1, 1], 26, tl.int64)
tmp162 = tmp0 >= tmp161
tmp163 = tmp0 < tmp156
tmp164 = tmp162 & tmp163
tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask,
eviction_policy='evict_last', other=0.0)
tmp166 = tmp0 >= tmp128
tmp167 = tmp0 < tmp161
tmp168 = tmp166 & tmp167
tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask,
eviction_policy='evict_last', other=0.0)
tmp170 = tl.where(tmp168, tmp169, tmp149)
tmp171 = tl.where(tmp164, tmp165, tmp170)
tmp172 = tl.where(tmp159, tmp160, tmp171)
tmp173 = tl.where(tmp154, tmp155, tmp172)
tmp174 = tl.full([1, 1], 32, tl.int64)
tmp175 = tmp0 >= tmp174
tmp176 = tl.full([1, 1], 33, tl.int64)
tmp177 = tmp0 < tmp176
tmp178 = tmp175 & tmp177
tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask,
eviction_policy='evict_last', other=0.0)
tmp180 = tl.full([1, 1], 31, tl.int64)
tmp181 = tmp0 >= tmp180
tmp182 = tmp0 < tmp174
tmp183 = tmp181 & tmp182
tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask,
eviction_policy='evict_last', other=0.0)
tmp185 = tl.full([1, 1], 30, tl.int64)
tmp186 = tmp0 >= tmp185
tmp187 = tmp0 < tmp180
tmp188 = tmp186 & tmp187
tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask,
eviction_policy='evict_last', other=0.0)
tmp190 = tmp0 >= tmp152
tmp191 = tmp0 < tmp185
tmp192 = tmp190 & tmp191
tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask,
eviction_policy='evict_last', other=0.0)
tmp194 = tl.where(tmp192, tmp193, tmp173)
tmp195 = tl.where(tmp188, tmp189, tmp194)
tmp196 = tl.where(tmp183, tmp184, tmp195)
tmp197 = tl.where(tmp178, tmp179, tmp196)
tmp198 = tl.full([1, 1], 36, tl.int64)
tmp199 = tmp0 >= tmp198
tmp200 = tl.full([1, 1], 37, tl.int64)
tmp201 = tmp0 < tmp200
tmp202 = tmp199 & tmp201
tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask,
eviction_policy='evict_last', other=0.0)
tmp204 = tl.full([1, 1], 35, tl.int64)
tmp205 = tmp0 >= tmp204
tmp206 = tmp0 < tmp198
tmp207 = tmp205 & tmp206
tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask,
eviction_policy='evict_last', other=0.0)
tmp209 = tl.full([1, 1], 34, tl.int64)
tmp210 = tmp0 >= tmp209
tmp211 = tmp0 < tmp204
tmp212 = tmp210 & tmp211
tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask,
eviction_policy='evict_last', other=0.0)
tmp214 = tmp0 >= tmp176
tmp215 = tmp0 < tmp209
tmp216 = tmp214 & tmp215
tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask,
eviction_policy='evict_last', other=0.0)
tmp218 = tl.where(tmp216, tmp217, tmp197)
tmp219 = tl.where(tmp212, tmp213, tmp218)
tmp220 = tl.where(tmp207, tmp208, tmp219)
tmp221 = tl.where(tmp202, tmp203, tmp220)
tmp222 = tl.full([1, 1], 40, tl.int64)
tmp223 = tmp0 >= tmp222
tmp224 = tl.full([1, 1], 41, tl.int64)
tmp225 = tmp0 < tmp224
tmp226 = tmp223 & tmp225
tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask,
eviction_policy='evict_last', other=0.0)
tmp228 = tl.full([1, 1], 39, tl.int64)
tmp229 = tmp0 >= tmp228
tmp230 = tmp0 < tmp222
tmp231 = tmp229 & tmp230
tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask,
eviction_policy='evict_last', other=0.0)
tmp233 = tl.full([1, 1], 38, tl.int64)
tmp234 = tmp0 >= tmp233
tmp235 = tmp0 < tmp228
tmp236 = tmp234 & tmp235
tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask,
eviction_policy='evict_last', other=0.0)
tmp238 = tmp0 >= tmp200
tmp239 = tmp0 < tmp233
tmp240 = tmp238 & tmp239
tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask,
eviction_policy='evict_last', other=0.0)
tmp242 = tl.where(tmp240, tmp241, tmp221)
tmp243 = tl.where(tmp236, tmp237, tmp242)
tmp244 = tl.where(tmp231, tmp232, tmp243)
tmp245 = tl.where(tmp226, tmp227, tmp244)
tmp246 = tl.full([1, 1], 44, tl.int64)
tmp247 = tmp0 >= tmp246
tmp248 = tl.full([1, 1], 45, tl.int64)
tmp249 = tmp0 < tmp248
tmp250 = tmp247 & tmp249
tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask,
eviction_policy='evict_last', other=0.0)
tmp252 = tl.full([1, 1], 43, tl.int64)
tmp253 = tmp0 >= tmp252
tmp254 = tmp0 < tmp246
tmp255 = tmp253 & tmp254
tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask,
eviction_policy='evict_last', other=0.0)
tmp257 = tl.full([1, 1], 42, tl.int64)
tmp258 = tmp0 >= tmp257
tmp259 = tmp0 < tmp252
tmp260 = tmp258 & tmp259
tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask,
eviction_policy='evict_last', other=0.0)
tmp262 = tmp0 >= tmp224
tmp263 = tmp0 < tmp257
tmp264 = tmp262 & tmp263
tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask,
eviction_policy='evict_last', other=0.0)
tmp266 = tl.where(tmp264, tmp265, tmp245)
tmp267 = tl.where(tmp260, tmp261, tmp266)
tmp268 = tl.where(tmp255, tmp256, tmp267)
tmp269 = tl.where(tmp250, tmp251, tmp268)
tmp270 = tl.full([1, 1], 48, tl.int64)
tmp271 = tmp0 >= tmp270
tmp272 = tl.full([1, 1], 49, tl.int64)
tmp273 = tmp0 < tmp272
tmp274 = tmp271 & tmp273
tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask,
eviction_policy='evict_last', other=0.0)
tmp276 = tl.full([1, 1], 47, tl.int64)
tmp277 = tmp0 >= tmp276
tmp278 = tmp0 < tmp270
tmp279 = tmp277 & tmp278
tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask,
eviction_policy='evict_last', other=0.0)
tmp281 = tl.full([1, 1], 46, tl.int64)
tmp282 = tmp0 >= tmp281
tmp283 = tmp0 < tmp276
tmp284 = tmp282 & tmp283
tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask,
eviction_policy='evict_last', other=0.0)
tmp286 = tmp0 >= tmp248
tmp287 = tmp0 < tmp281
tmp288 = tmp286 & tmp287
tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask,
eviction_policy='evict_last', other=0.0)
tmp290 = tl.where(tmp288, tmp289, tmp269)
tmp291 = tl.where(tmp284, tmp285, tmp290)
tmp292 = tl.where(tmp279, tmp280, tmp291)
tmp293 = tl.where(tmp274, tmp275, tmp292)
tmp294 = tl.full([1, 1], 52, tl.int64)
tmp295 = tmp0 >= tmp294
tmp296 = tl.full([1, 1], 53, tl.int64)
tmp297 = tmp0 < tmp296
tmp298 = tmp295 & tmp297
tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask,
eviction_policy='evict_last', other=0.0)
tmp300 = tl.full([1, 1], 51, tl.int64)
tmp301 = tmp0 >= tmp300
tmp302 = tmp0 < tmp294
tmp303 = tmp301 & tmp302
tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask,
eviction_policy='evict_last', other=0.0)
tmp305 = tl.full([1, 1], 50, tl.int64)
tmp306 = tmp0 >= tmp305
tmp307 = tmp0 < tmp300
tmp308 = tmp306 & tmp307
tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask,
eviction_policy='evict_last', other=0.0)
tmp310 = tmp0 >= tmp272
tmp311 = tmp0 < tmp305
tmp312 = tmp310 & tmp311
tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask,
eviction_policy='evict_last', other=0.0)
tmp314 = tl.where(tmp312, tmp313, tmp293)
tmp315 = tl.where(tmp308, tmp309, tmp314)
tmp316 = tl.where(tmp303, tmp304, tmp315)
tmp317 = tl.where(tmp298, tmp299, tmp316)
tmp318 = tl.full([1, 1], 56, tl.int64)
tmp319 = tmp0 >= tmp318
tmp320 = tl.full([1, 1], 57, tl.int64)
tmp321 = tmp0 < tmp320
tmp322 = tmp319 & tmp321
tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask,
eviction_policy='evict_last', other=0.0)
tmp324 = tl.full([1, 1], 55, tl.int64)
tmp325 = tmp0 >= tmp324
tmp326 = tmp0 < tmp318
tmp327 = tmp325 & tmp326
tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask,
eviction_policy='evict_last', other=0.0)
tmp329 = tl.full([1, 1], 54, tl.int64)
tmp330 = tmp0 >= tmp329
tmp331 = tmp0 < tmp324
tmp332 = tmp330 & tmp331
tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask,
eviction_policy='evict_last', other=0.0)
tmp334 = tmp0 >= tmp296
tmp335 = tmp0 < tmp329
tmp336 = tmp334 & tmp335
tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask,
eviction_policy='evict_last', other=0.0)
tmp338 = tl.where(tmp336, tmp337, tmp317)
tmp339 = tl.where(tmp332, tmp333, tmp338)
tmp340 = tl.where(tmp327, tmp328, tmp339)
tmp341 = tl.where(tmp322, tmp323, tmp340)
tmp342 = tl.full([1, 1], 60, tl.int64)
tmp343 = tmp0 >= tmp342
tmp344 = tl.full([1, 1], 61, tl.int64)
tmp345 = tmp0 < tmp344
tmp346 = tmp343 & tmp345
tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask,
eviction_policy='evict_last', other=0.0)
tmp348 = tl.full([1, 1], 59, tl.int64)
tmp349 = tmp0 >= tmp348
tmp350 = tmp0 < tmp342
tmp351 = tmp349 & tmp350
tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask,
eviction_policy='evict_last', other=0.0)
tmp353 = tl.full([1, 1], 58, tl.int64)
tmp354 = tmp0 >= tmp353
tmp355 = tmp0 < tmp348
tmp356 = tmp354 & tmp355
tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask,
eviction_policy='evict_last', other=0.0)
tmp358 = tmp0 >= tmp320
tmp359 = tmp0 < tmp353
tmp360 = tmp358 & tmp359
tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask,
eviction_policy='evict_last', other=0.0)
tmp362 = tl.where(tmp360, tmp361, tmp341)
tmp363 = tl.where(tmp356, tmp357, tmp362)
tmp364 = tl.where(tmp351, tmp352, tmp363)
tmp365 = tl.where(tmp346, tmp347, tmp364)
tmp366 = tl.full([1, 1], 63, tl.int64)
tmp367 = tmp0 >= tmp366
tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask,
eviction_policy='evict_last', other=0.0)
tmp369 = tl.full([1, 1], 62, tl.int64)
tmp370 = tmp0 >= tmp369
tmp371 = tmp0 < tmp366
tmp372 = tmp370 & tmp371
tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask,
eviction_policy='evict_last', other=0.0)
tmp374 = tmp0 >= tmp344
tmp375 = tmp0 < tmp369
tmp376 = tmp374 & tmp375
tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask,
eviction_policy='evict_last', other=0.0)
tmp378 = tl.where(tmp376, tmp377, tmp365)
tmp379 = tl.where(tmp372, tmp373, tmp378)
tmp380 = tl.where(tmp367, tmp368, tmp379)
tmp381 = tmp380 * tmp380
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp384 = tl.where(xmask, tmp382, 0)
tmp385 = tl.sum(tmp384, 1)[:, None]
tmp386 = libdevice.sqrt(tmp385)
tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp386, xmask)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 4
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = 1e-12
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 / tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = _tmp7 + tmp6
_tmp7 = tl.where(rmask & xmask, tmp8, _tmp7)
tmp7 = tl.sum(_tmp7, 1)[:, None]
tmp9 = libdevice.sqrt(tmp7)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = 1e-12
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp10 / tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp12)
tmp16 = tmp14 / tmp15
tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1))
assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_3, (64, 128), (128, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
get_raw_stream(0)
triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0,
16384, 128, XBLOCK=64, RBLOCK=4, num_warps=8, num_stages=1)
buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096,
1), torch.float32)
buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096,
1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152,
4096, 1), torch.float32)
triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0,
primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19,
buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39,
buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60,
buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80,
buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100,
buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118,
buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136,
buf138, buf141, buf143, buf145, 2097152, XBLOCK=512, num_warps=
8, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32)
triton_per_fused__softmax_2[grid(16384)](buf2, buf3, buf4, 16384,
64, XBLOCK=8, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2,
buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19,
buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39,
buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60,
buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16,
buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36,
buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56,
buf58, buf61, buf63, buf65, buf67, 512, 4096, XBLOCK=1, RBLOCK=
1024, num_warps=16, num_stages=1)
buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sum_4[grid(512)](buf69, buf2, buf3, buf4,
buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89,
buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107,
buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125,
buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83,
buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103,
buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121,
buf124, buf126, buf128, buf130, 512, 4096, XBLOCK=1, RBLOCK=
1024, num_warps=16, num_stages=1)
buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32)
triton_red_fused_mul_sum_5[grid(512)](buf132, buf2, buf3, buf4,
buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135,
buf137, buf139, buf142, buf144, buf146, 512, 4096, XBLOCK=1,
RBLOCK=1024, num_warps=16, num_stages=1)
buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32)
buf23 = buf14
del buf14
buf32 = buf23
del buf23
buf41 = buf32
del buf32
buf50 = buf41
del buf41
buf59 = buf50
del buf50
buf68 = buf59
del buf59
buf77 = buf68
del buf68
buf86 = buf77
del buf77
buf95 = buf86
del buf86
buf104 = buf95
del buf95
buf113 = buf104
del buf104
buf122 = buf113
del buf113
buf131 = buf122
del buf122
buf140 = buf131
del buf131
buf147 = buf140
del buf140
buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32)
buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0)
del buf148
triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf147,
buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18,
buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34,
buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67,
buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83,
buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99,
buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117,
buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135,
buf133, buf146, buf144, buf142, 256, 128, XBLOCK=1, num_warps=2,
num_stages=1)
del buf101
del buf103
del buf106
del buf108
del buf11
del buf110
del buf112
del buf115
del buf117
del buf119
del buf121
del buf124
del buf126
del buf128
del buf13
del buf130
del buf133
del buf135
del buf137
del buf139
del buf142
del buf144
del buf146
del buf16
del buf18
del buf20
del buf22
del buf25
del buf27
del buf29
del buf31
del buf34
del buf36
del buf38
del buf40
del buf43
del buf45
del buf47
del buf49
del buf5
del buf52
del buf54
del buf56
del buf58
del buf61
del buf63
del buf65
del buf67
del buf7
del buf70
del buf72
del buf74
del buf76
del buf79
del buf81
del buf83
del buf85
del buf88
del buf9
del buf90
del buf92
del buf94
del buf97
del buf99
buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0)
del buf150
buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32)
triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf151, buf147,
buf149, buf152, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor(
primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15,
buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35,
buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55,
buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75,
buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96,
buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114,
buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132,
buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151)
class NetVLADNew(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128, normalize_input=True,
vladv2=False):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super(NetVLADNew, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=
vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
if self.vladv2 is False:
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha *
clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
knn = NearestNeighbors(n_jobs=-1)
knn.fit(traindescs)
del traindescs
dsSq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0])
).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, dsSq
self.conv.weight = nn.Parameter((2.0 * self.alpha * self.
centroids).unsqueeze(-1).unsqueeze(-1))
self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm
(dim=1))
def forward(self, input_0):
primals_3 = self.centroids
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
AlessandroRigoli/project_vg
|
NetVLAD
| false
| 11,565
|
[
"MIT"
] | 0
|
cb1323bee60cdb4108fe0aab68791321c7974832
|
https://github.com/AlessandroRigoli/project_vg/tree/cb1323bee60cdb4108fe0aab68791321c7974832
|
Block
|
import torch
from torch import nn
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn
.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0,
proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=
0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn
.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
JetRunner/PaSST-EE
|
Block
| false
| 11,566
|
[
"Apache-2.0"
] | 0
|
2ff8f4fd0e9c1868856d08147e6e3cf1c1bed68c
|
https://github.com/JetRunner/PaSST-EE/tree/2ff8f4fd0e9c1868856d08147e6e3cf1c1bed68c
|
BinarySigmoid
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinarySigmoid(nn.Module):
def forward(self, x):
return torch.sigmoid(x[0]) * x[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinarySigmoidNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinarySigmoid
| false
| 11,567
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
MLB
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLB(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input=
'relu', activ_output='relu', normalize=False, dropout_input=0.0,
dropout_pre_lin=0.0, dropout_output=0.0):
super(MLB, self).__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = x0 * x1
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dims': [4, 4], 'output_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1200
x1 = xindex // 1200
tmp0 = tl.load(in_ptr0 + (x0 + 1216 * x1), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 1216 * x1), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 * tmp4
tl.store(out_ptr0 + (x0 + 1216 * x1), tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1200, 4), (4, 1))
assert_size_stride(primals_3, (1200,), (1,))
assert_size_stride(primals_4, (1200, 4), (4, 1))
assert_size_stride(primals_5, (1200,), (1,))
assert_size_stride(primals_6, (4, 1200), (1200, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1200), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 64), reinterpret_tensor(primals_4, (4, 1200), (1, 4
), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 1200), (4864, 1216, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_relu_0[grid(19200)](buf0, buf1, buf2, 19200,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0
), reinterpret_tensor(primals_6, (1200, 4), (1, 1200), 0), out=buf3
)
buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf4,
primals_7, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_1, (16, 4), (4, 1), 64
), buf1, reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0
), buf5, primals_6
class MLBNew(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input=
'relu', activ_output='relu', normalize=False, dropout_input=0.0,
dropout_pre_lin=0.0, dropout_output=0.0):
super(MLBNew, self).__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.
requires_grad)
def forward(self, input_0):
primals_2 = self.linear0.weight
primals_3 = self.linear0.bias
primals_4 = self.linear1.weight
primals_5 = self.linear1.bias
primals_6 = self.linear_out.weight
primals_7 = self.linear_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
JoannaLXY/block.bootstrap.pytorch
|
MLB
| false
| 11,568
|
[
"BSD-3-Clause"
] | 0
|
42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
https://github.com/JoannaLXY/block.bootstrap.pytorch/tree/42c3e7616b704e05c6ff2376ff68b5b18044fe77
|
PatchEmbed
|
import torch
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1,
4, 4), padding=(1, 7, 7), conv_2d=False):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride,
padding=padding)
def forward(self, x):
x = self.proj(x)
return x.flatten(2).transpose(1, 2)
def get_inputs():
return [torch.rand([4, 3, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import chain as chain
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16896 % 768
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (768, 3, 1, 16, 16), (768, 256, 256, 16, 1))
assert_size_stride(primals_2, (768,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64, 64), (786432, 262144, 4096,
64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
4, 4), padding=(1, 7, 7), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 768, 66, 16, 16), (12976128, 16896,
256, 16, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(51904512)](buf1, primals_2,
51904512, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 16896, 768), (12976128, 1, 16896), 0
), primals_1, primals_3
class PatchEmbedNew(nn.Module):
"""
PatchEmbed.
"""
def __init__(self, dim_in=3, dim_out=768, kernel=(1, 16, 16), stride=(1,
4, 4), padding=(1, 7, 7), conv_2d=False):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(dim_in, dim_out, kernel_size=kernel, stride=stride,
padding=padding)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JerryYLi/SlowFast
|
PatchEmbed
| false
| 11,569
|
[
"Apache-2.0"
] | 0
|
70bbd8d917c49f86b41fdd7c2de5c1231e6d950c
|
https://github.com/JerryYLi/SlowFast/tree/70bbd8d917c49f86b41fdd7c2de5c1231e6d950c
|
BinaryDivide
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryDivide(nn.Module):
def forward(self, x):
return x[0] / (x[1] + 1e-07)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp2 = 1e-07
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryDivideNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinaryDivide
| false
| 11,570
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BinaryMinus
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMinus(nn.Module):
def forward(self, x):
return x[0] - x[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMinusNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinaryMinus
| false
| 11,571
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BinaryMin
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMin(nn.Module):
def forward(self, x):
return torch.min(x[0], x[1])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_minimum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_minimum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMinNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinaryMin
| false
| 11,572
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BinaryParamAdd
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryParamAdd(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, x):
return self.beta * x[0] + (1 - self.beta) * x[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp6 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp1
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_0[grid(64)](primals_1, primals_2,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
return buf0, reinterpret_tensor(primals_2, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_2, (4, 4, 4), (16, 4, 1), 64)
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryParamAddNew(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Johnsonms/NNI_master
|
BinaryParamAdd
| false
| 11,573
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BinaryMax
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMax(nn.Module):
def forward(self, x):
return torch.max(x[0], x[1])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_maximum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryMaxNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinaryMax
| false
| 11,574
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
DistillKL
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s / self.T, dim=1)
p_t = F.softmax(y_t / self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False
) * self.T ** 2 / y_s.shape[0]
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 16.0
tmp37 = tmp35 * tmp36
tmp38 = 0.25
tmp39 = tmp37 * tmp38
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1)
](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class DistillKLNew(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKLNew, self).__init__()
self.T = T
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Johnsonms/NNI_master
|
DistillKL
| false
| 11,575
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
PatchSequential
|
import math
import torch
import warnings
from typing import Dict
from typing import Optional
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from typing import cast
from typing import List
from typing import Union
from torch.distributions import Bernoulli
from itertools import zip_longest
from collections import OrderedDict
from typing import Any
from typing import Iterator
from typing import NamedTuple
from torch.nn.modules.utils import _pair
from math import pi
def _adapted_sampling(shape: 'Union[Tuple, torch.Size]', dist:
'torch.distributions.Distribution', same_on_batch=False) ->torch.Tensor:
"""The uniform sampling function that accepts 'same_on_batch'.
If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]).
By default, same_on_batch is set to False.
"""
if same_on_batch:
return dist.sample((1, *shape[1:])).repeat(shape[0], *([1] * (len(
shape) - 1)))
return dist.sample(shape)
def _transform_output_shape(output:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', shape: 'Tuple'
) ->Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Collapse the broadcasted batch dimensions an input tensor to be the specified shape.
Args:
input: torch.Tensor
shape: List/tuple of int
Returns:
torch.Tensor
"""
is_tuple = isinstance(output, tuple)
out_tensor: 'torch.Tensor'
trans_matrix: 'Optional[torch.Tensor]'
if is_tuple:
out_tensor, trans_matrix = cast(Tuple[torch.Tensor, torch.Tensor],
output)
else:
out_tensor = cast(torch.Tensor, output)
trans_matrix = None
if trans_matrix is not None:
if len(out_tensor.shape) > len(shape):
assert trans_matrix.shape[0
] == 1, f'Dimension 0 of transformation matrix is expected to be 1, got {trans_matrix.shape[0]}'
trans_matrix = trans_matrix.squeeze(0)
for dim in range(len(out_tensor.shape) - len(shape)):
assert out_tensor.shape[0
] == 1, f'Dimension {dim} of input is expected to be 1, got {out_tensor.shape[0]}'
out_tensor = out_tensor.squeeze(0)
return (out_tensor, trans_matrix) if is_tuple else out_tensor
def _transform_input(input: 'torch.Tensor') ->torch.Tensor:
"""Reshape an input tensor to be (*, C, H, W). Accept either (H, W), (C, H, W) or (*, C, H, W).
Args:
input: torch.Tensor
Returns:
torch.Tensor
"""
if not torch.is_tensor(input):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if len(input.shape) not in [2, 3, 4]:
raise ValueError(
f'Input size must have a shape of either (H, W), (C, H, W) or (*, C, H, W). Got {input.shape}'
)
if len(input.shape) == 2:
input = input.unsqueeze(0)
if len(input.shape) == 3:
input = input.unsqueeze(0)
return input
def _validate_input_dtype(input: 'torch.Tensor', accepted_dtypes: 'List'
) ->None:
"""Check if the dtype of the input tensor is in the range of accepted_dtypes
Args:
input: torch.Tensor
accepted_dtypes: List. e.g. [torch.float32, torch.float64]
"""
if input.dtype not in accepted_dtypes:
raise TypeError(
f'Expected input of {accepted_dtypes}. Got {input.dtype}')
def _extract_device_dtype(tensor_list: 'List[Optional[Any]]') ->Tuple[torch
.device, torch.dtype]:
"""Check if all the input are in the same device (only if when they are torch.Tensor).
If so, it would return a tuple of (device, dtype). Default: (cpu, ``get_default_dtype()``).
Returns:
[torch.device, torch.dtype]
"""
device, dtype = None, None
for tensor in tensor_list:
if tensor is not None:
if not isinstance(tensor, (torch.Tensor,)):
continue
_device = tensor.device
_dtype = tensor.dtype
if device is None and dtype is None:
device = _device
dtype = _dtype
elif device != _device or dtype != _dtype:
raise ValueError(
f'Passed values are not in the same device and dtype.Got ({device}, {dtype}) and ({_device}, {_dtype}).'
)
if device is None:
device = torch.device('cpu')
if dtype is None:
dtype = torch.get_default_dtype()
return device, dtype
def _joint_range_check(ranged_factor: 'torch.Tensor', name: 'str', bounds:
'Optional[Tuple[float, float]]'=None) ->None:
"""check if bounds[0] <= ranged_factor[0] <= ranged_factor[1] <= bounds[1]"""
if bounds is None:
bounds = float('-inf'), float('inf')
if ranged_factor.dim() == 1 and len(ranged_factor) == 2:
if not bounds[0] <= ranged_factor[0] or not bounds[1] >= ranged_factor[
1]:
raise ValueError(
f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.'
)
if not bounds[0] <= ranged_factor[0] <= ranged_factor[1] <= bounds[1]:
raise ValueError(
f'{name}[0] should be smaller than {name}[1] got {ranged_factor}'
)
else:
raise TypeError(
f'{name} should be a tensor with length 2 whose values between {bounds}. Got {ranged_factor}.'
)
def _singular_range_check(ranged_factor: 'torch.Tensor', name: 'str',
bounds: 'Optional[Tuple[float, float]]'=None, skip_none: 'bool'=False,
mode: 'str'='2d') ->None:
"""check if bounds[0] <= ranged_factor[0] <= bounds[1] and bounds[0] <= ranged_factor[1] <= bounds[1]"""
if mode == '2d':
dim_size = 2
elif mode == '3d':
dim_size = 3
else:
raise ValueError(f"'mode' shall be either 2d or 3d. Got {mode}")
if skip_none and ranged_factor is None:
return
if bounds is None:
bounds = float('-inf'), float('inf')
if ranged_factor.dim() == 1 and len(ranged_factor) == dim_size:
for f in ranged_factor:
if not bounds[0] <= f <= bounds[1]:
raise ValueError(
f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.'
)
else:
raise TypeError(
f'{name} should be a float number or a tuple with length {dim_size} whose values between {bounds}.Got {ranged_factor}'
)
def _range_bound(factor:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]', name:
'str', center: 'float'=0.0, bounds: 'Tuple[float, float]'=(0, float(
'inf')), check: 'Optional[str]'='joint', device: 'torch.device'=torch.
device('cpu'), dtype: 'torch.dtype'=torch.get_default_dtype()
) ->torch.Tensor:
"""Check inputs and compute the corresponding factor bounds"""
if not isinstance(factor, torch.Tensor):
factor = torch.tensor(factor, device=device, dtype=dtype)
factor_bound: 'torch.Tensor'
if factor.dim() == 0:
if factor < 0:
raise ValueError(
f'If {name} is a single number number, it must be non negative. Got {factor}'
)
factor_bound = factor.repeat(2) * torch.tensor([-1.0, 1.0], device=
factor.device, dtype=factor.dtype) + center
factor_bound = factor_bound.clamp(bounds[0], bounds[1])
else:
factor_bound = torch.as_tensor(factor, device=device, dtype=dtype)
if check is not None:
if check == 'joint':
_joint_range_check(factor_bound, name, bounds)
elif check == 'singular':
_singular_range_check(factor_bound, name, bounds)
else:
raise NotImplementedError(f"methods '{check}' not implemented.")
return factor_bound
def adjust_brightness(input: 'torch.Tensor', brightness_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust Brightness of an image.
.. image:: _static/img/adjust_brightness.png
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input: image to be adjusted in the shape of :math:`(*, N)`.
brightness_factor: Brightness adjust factor per element
in the batch. 0 does not modify the input image while any other number modify the
brightness.
Return:
Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 2, 2)
>>> adjust_brightness(x, 1.)
tensor([[[[1., 1.],
[1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.tensor([0.25, 0.50])
>>> adjust_brightness(x, y).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(brightness_factor, (float, torch.Tensor)):
raise TypeError(
f'The factor should be either a float or torch.Tensor. Got {type(brightness_factor)}'
)
if isinstance(brightness_factor, float):
brightness_factor = torch.tensor([brightness_factor])
brightness_factor = brightness_factor.to(input.device)
for _ in input.shape[1:]:
brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)
x_adjust: 'torch.Tensor' = input + brightness_factor
out: 'torch.Tensor' = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_contrast(input: 'torch.Tensor', contrast_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust Contrast of an image.
.. image:: _static/img/adjust_contrast.png
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input: Image to be adjusted in the shape of :math:`(*, N)`.
contrast_factor: Contrast adjust factor per element
in the batch. 0 generates a completely black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Return:
Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 2, 2)
>>> adjust_contrast(x, 0.5)
tensor([[[[0.5000, 0.5000],
[0.5000, 0.5000]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.tensor([0.65, 0.50])
>>> adjust_contrast(x, y).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(contrast_factor, (float, torch.Tensor)):
raise TypeError(
f'The factor should be either a float or torch.Tensor. Got {type(contrast_factor)}'
)
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(input.device)
if (contrast_factor < 0).any():
raise ValueError(
f'Contrast factor must be non-negative. Got {contrast_factor}')
for _ in input.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
x_adjust: 'torch.Tensor' = input * contrast_factor
out: 'torch.Tensor' = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_hue_raw(input: 'torch.Tensor', hue_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust hue of an image. Expecting input to be in hsv format already."""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(hue_factor, (float, torch.Tensor)):
raise TypeError(
f'The hue_factor should be a float number or torch.Tensor in the range between [-PI, PI]. Got {type(hue_factor)}'
)
if isinstance(hue_factor, float):
hue_factor = torch.as_tensor(hue_factor)
hue_factor = hue_factor
for _ in input.shape[1:]:
hue_factor = torch.unsqueeze(hue_factor, dim=-1)
h, s, v = torch.chunk(input, chunks=3, dim=-3)
divisor: 'float' = 2 * pi
h_out: 'torch.Tensor' = torch.fmod(h + hue_factor, divisor)
out: 'torch.Tensor' = torch.cat([h_out, s, v], dim=-3)
return out
def hsv_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert an image from HSV to RGB.
The H channel values are assumed to be in the range 0..2pi. S and V are in the range 0..1.
Args:
image: HSV Image to be converted to HSV with shape of :math:`(*, 3, H, W)`.
Returns:
RGB version of the image with shape of :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = hsv_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W). Got {}'
.format(image.shape))
h: 'torch.Tensor' = image[..., 0, :, :] / (2 * math.pi)
s: 'torch.Tensor' = image[..., 1, :, :]
v: 'torch.Tensor' = image[..., 2, :, :]
hi: 'torch.Tensor' = torch.floor(h * 6) % 6
f: 'torch.Tensor' = h * 6 % 6 - hi
one: 'torch.Tensor' = torch.tensor(1.0).to(image.device)
p: 'torch.Tensor' = v * (one - s)
q: 'torch.Tensor' = v * (one - f * s)
t: 'torch.Tensor' = v * (one - (one - f) * s)
hi = hi.long()
indices: 'torch.Tensor' = torch.stack([hi, hi + 6, hi + 12], dim=-3)
out = torch.stack((v, q, p, p, t, v, t, v, v, q, p, p, p, p, t, v, v, q
), dim=-3)
out = torch.gather(out, -3, indices)
return out
def rgb_to_hsv(image: 'torch.Tensor', eps: 'float'=1e-06) ->torch.Tensor:
"""Convert an image from RGB to HSV.
.. image:: _static/img/rgb_to_hsv.png
The image data is assumed to be in the range of (0, 1).
Args:
image: RGB Image to be converted to HSV with shape of :math:`(*, 3, H, W)`.
eps: scalar to enforce numarical stability.
Returns:
HSV version of the image with shape of :math:`(*, 3, H, W)`.
The H channel values are in the range 0..2pi. S and V are in the range 0..1.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_hsv(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W). Got {}'
.format(image.shape))
maxc, _ = image.max(-3)
maxc_mask = image == maxc.unsqueeze(-3)
_, max_indices = ((maxc_mask.cumsum(-3) == 1) & maxc_mask).max(-3)
minc: 'torch.Tensor' = image.min(-3)[0]
v: 'torch.Tensor' = maxc
deltac: 'torch.Tensor' = maxc - minc
s: 'torch.Tensor' = deltac / (v + eps)
deltac = torch.where(deltac == 0, torch.ones_like(deltac, device=deltac
.device, dtype=deltac.dtype), deltac)
maxc_tmp = maxc.unsqueeze(-3) - image
rc: 'torch.Tensor' = maxc_tmp[..., 0, :, :]
gc: 'torch.Tensor' = maxc_tmp[..., 1, :, :]
bc: 'torch.Tensor' = maxc_tmp[..., 2, :, :]
h = torch.stack([bc - gc, 2.0 * deltac + rc - bc, 4.0 * deltac + gc -
rc], dim=-3)
h = torch.gather(h, dim=-3, index=max_indices[..., None, :, :])
h = h.squeeze(-3)
h = h / deltac
h = h / 6.0 % 1.0
h = 2 * math.pi * h
return torch.stack([h, s, v], dim=-3)
def adjust_hue(input: 'torch.Tensor', hue_factor: 'Union[float, torch.Tensor]'
) ->torch.Tensor:
"""Adjust hue of an image.
.. image:: _static/img/adjust_hue.png
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input: Image to be adjusted in the shape of :math:`(*, 3, H, W)`.
hue_factor: How much to shift the hue channel. Should be in [-PI, PI]. PI
and -PI give complete reversal of hue channel in HSV space in positive and negative
direction respectively. 0 means no shift. Therefore, both -PI and PI will give an
image with complementary colors while 0 gives the original image.
Return:
Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 2, 2)
>>> adjust_hue(x, 3.141516).shape
torch.Size([1, 3, 2, 2])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2) * 3.141516
>>> adjust_hue(x, y).shape
torch.Size([2, 3, 3, 3])
"""
x_hsv: 'torch.Tensor' = rgb_to_hsv(input)
x_adjusted: 'torch.Tensor' = adjust_hue_raw(x_hsv, hue_factor)
out: 'torch.Tensor' = hsv_to_rgb(x_adjusted)
return out
def adjust_saturation_raw(input: 'torch.Tensor', saturation_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust color saturation of an image. Expecting input to be in hsv format already."""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(saturation_factor, (float, torch.Tensor)):
raise TypeError(
f'The saturation_factor should be a float number or torch.Tensor.Got {type(saturation_factor)}'
)
if isinstance(saturation_factor, float):
saturation_factor = torch.as_tensor(saturation_factor)
saturation_factor = saturation_factor.to(input.device)
for _ in input.shape[1:]:
saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)
h, s, v = torch.chunk(input, chunks=3, dim=-3)
s_out: 'torch.Tensor' = torch.clamp(s * saturation_factor, min=0, max=1)
out: 'torch.Tensor' = torch.cat([h, s_out, v], dim=-3)
return out
def adjust_saturation(input: 'torch.Tensor', saturation_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust color saturation of an image.
.. image:: _static/img/adjust_saturation.png
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input: Image/Tensor to be adjusted in the shape of :math:`(*, 3, H, W)`.
saturation_factor: How much to adjust the saturation. 0 will give a black
and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2.
Return:
Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> adjust_saturation(x, 2.).shape
torch.Size([1, 3, 3, 3])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.tensor([1., 2.])
>>> adjust_saturation(x, y).shape
torch.Size([2, 3, 3, 3])
"""
x_hsv: 'torch.Tensor' = rgb_to_hsv(input)
x_adjusted: 'torch.Tensor' = adjust_saturation_raw(x_hsv, saturation_factor
)
out: 'torch.Tensor' = hsv_to_rgb(x_adjusted)
return out
def _extract_tensor_patchesnd(input: 'torch.Tensor', window_sizes:
'Tuple[int, ...]', strides: 'Tuple[int, ...]') ->torch.Tensor:
batch_size, num_channels = input.size()[:2]
dims = range(2, input.dim())
for dim, patch_size, stride in zip(dims, window_sizes, strides):
input = input.unfold(dim, patch_size, stride)
input = input.permute(0, *dims, 1, *[(dim + len(dims)) for dim in dims]
).contiguous()
return input.view(batch_size, -1, num_channels, *window_sizes)
def extract_tensor_patches(input: 'torch.Tensor', window_size:
'Union[int, Tuple[int, int]]', stride: 'Union[int, Tuple[int, int]]'=1,
padding: 'Union[int, Tuple[int, int]]'=0) ->torch.Tensor:
"""Function that extract patches from tensors and stack them.
See :class:`~kornia.contrib.ExtractTensorPatches` for details.
"""
if not torch.is_tensor(input):
raise TypeError('Input input type is not a torch.Tensor. Got {}'.
format(type(input)))
if not len(input.shape) == 4:
raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'.
format(input.shape))
if padding:
pad_vert, pad_horz = _pair(padding)
input = F.pad(input, [pad_horz, pad_horz, pad_vert, pad_vert])
return _extract_tensor_patchesnd(input, _pair(window_size), _pair(stride))
class _BasicAugmentationBase(nn.Module):
"""_BasicAugmentationBase base class for customized augmentation implementations.
Plain augmentation base class without the functionality of transformation matrix calculations.
By default, the random computations will be happened on CPU with ``torch.get_default_dtype()``.
To change this behaviour, please use ``set_rng_device_and_dtype``.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation
probabilities element-wisely.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def __init__(self, p: 'float'=0.5, p_batch: 'float'=1.0, same_on_batch:
'bool'=False, keepdim: 'bool'=False) ->None:
super(_BasicAugmentationBase, self).__init__()
self.p = p
self.p_batch = p_batch
self.same_on_batch = same_on_batch
self.keepdim = keepdim
self._params: 'Dict[str, torch.Tensor]' = {}
if p != 0.0 or p != 1.0:
self._p_gen = Bernoulli(self.p)
if p_batch != 0.0 or p_batch != 1.0:
self._p_batch_gen = Bernoulli(self.p_batch)
self.set_rng_device_and_dtype(torch.device('cpu'), torch.
get_default_dtype())
def __repr__(self) ->str:
return (
f'p={self.p}, p_batch={self.p_batch}, same_on_batch={self.same_on_batch}'
)
def __unpack_input__(self, input: 'torch.Tensor') ->torch.Tensor:
return input
def __check_batching__(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]'):
"""Check if a transformation matrix is returned,
it has to be in the same batching mode as output."""
raise NotImplementedError
def transform_tensor(self, input: 'torch.Tensor') ->torch.Tensor:
"""Standardize input tensors."""
raise NotImplementedError
def generate_parameters(self, batch_shape: 'torch.Size') ->Dict[str,
torch.Tensor]:
return {}
def apply_transform(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->torch.Tensor:
raise NotImplementedError
def set_rng_device_and_dtype(self, device: 'torch.device', dtype:
'torch.dtype') ->None:
"""Change the random generation device and dtype.
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
self.device = device
self.dtype = dtype
def __batch_prob_generator__(self, batch_shape: 'torch.Size', p:
'float', p_batch: 'float', same_on_batch: 'bool') ->torch.Tensor:
batch_prob: 'torch.Tensor'
if p_batch == 1:
batch_prob = torch.tensor([True])
elif p_batch == 0:
batch_prob = torch.tensor([False])
else:
batch_prob = _adapted_sampling((1,), self._p_batch_gen,
same_on_batch).bool()
if batch_prob.sum().item() == 1:
elem_prob: 'torch.Tensor'
if p == 1:
elem_prob = torch.tensor([True] * batch_shape[0])
elif p == 0:
elem_prob = torch.tensor([False] * batch_shape[0])
else:
elem_prob = _adapted_sampling((batch_shape[0],), self.
_p_gen, same_on_batch).bool()
batch_prob = batch_prob * elem_prob
else:
batch_prob = batch_prob.repeat(batch_shape[0])
return batch_prob
def forward_parameters(self, batch_shape):
to_apply = self.__batch_prob_generator__(batch_shape, self.p, self.
p_batch, self.same_on_batch)
_params = self.generate_parameters(torch.Size((int(to_apply.sum().
item()), *batch_shape[1:])))
if _params is None:
_params = {}
_params['batch_prob'] = to_apply
return _params
def apply_func(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->Union[torch.Tensor, Tuple[torch.Tensor,
torch.Tensor]]:
input = self.transform_tensor(input)
return self.apply_transform(input, params)
def forward(self, input: 'torch.Tensor', params:
'Optional[Dict[str, torch.Tensor]]'=None) ->Union[torch.Tensor,
Tuple[torch.Tensor, torch.Tensor]]:
in_tensor = self.__unpack_input__(input)
self.__check_batching__(input)
ori_shape = in_tensor.shape
in_tensor = self.transform_tensor(in_tensor)
batch_shape = in_tensor.shape
if params is None:
params = self.forward_parameters(batch_shape)
self._params = params
output = self.apply_func(input, self._params)
return _transform_output_shape(output, ori_shape
) if self.keepdim else output
class _AugmentationBase(_BasicAugmentationBase):
"""_AugmentationBase base class for customized augmentation implementations.
Advanced augmentation base class with the functionality of transformation matrix calculations.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation probabilities
element-wisely for a batch.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
return_transform (bool): if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def __init__(self, return_transform: 'bool'=None, same_on_batch: 'bool'
=False, p: 'float'=0.5, p_batch: 'float'=1.0, keepdim: 'bool'=False
) ->None:
super(_AugmentationBase, self).__init__(p, p_batch=p_batch,
same_on_batch=same_on_batch, keepdim=keepdim)
self.p = p
self.p_batch = p_batch
self.return_transform = return_transform
def __repr__(self) ->str:
return super().__repr__(
) + f', return_transform={self.return_transform}'
def identity_matrix(self, input: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
def compute_transformation(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->torch.Tensor:
raise NotImplementedError
def apply_transform(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]', transform: 'Optional[torch.Tensor]'=None
) ->torch.Tensor:
raise NotImplementedError
def __unpack_input__(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]') ->Tuple[
torch.Tensor, Optional[torch.Tensor]]:
if isinstance(input, tuple):
in_tensor = input[0]
in_transformation = input[1]
return in_tensor, in_transformation
in_tensor = input
return in_tensor, None
def apply_func(self, in_tensor: 'torch.Tensor', in_transform:
'Optional[torch.Tensor]', params: 'Dict[str, torch.Tensor]',
return_transform: 'bool'=False) ->Union[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
to_apply = params['batch_prob']
if torch.sum(to_apply) == 0:
output = in_tensor
trans_matrix = self.identity_matrix(in_tensor)
elif torch.sum(to_apply) == len(to_apply):
trans_matrix = self.compute_transformation(in_tensor, params)
output = self.apply_transform(in_tensor, params, trans_matrix)
else:
output = in_tensor.clone()
trans_matrix = self.identity_matrix(in_tensor)
trans_matrix[to_apply] = self.compute_transformation(in_tensor[
to_apply], params)
output[to_apply] = self.apply_transform(in_tensor[to_apply],
params, trans_matrix[to_apply])
self._transform_matrix = trans_matrix
if return_transform:
out_transformation = (trans_matrix if in_transform is None else
trans_matrix @ in_transform)
return output, out_transformation
if in_transform is not None:
return output, in_transform
return output
def forward(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', params:
'Optional[Dict[str, torch.Tensor]]'=None, return_transform:
'Optional[bool]'=None) ->Union[torch.Tensor, Tuple[torch.Tensor,
torch.Tensor]]:
in_tensor, in_transform = self.__unpack_input__(input)
self.__check_batching__(input)
ori_shape = in_tensor.shape
in_tensor = self.transform_tensor(in_tensor)
batch_shape = in_tensor.shape
if return_transform is None:
return_transform = self.return_transform
return_transform = cast(bool, return_transform)
if params is None:
params = self.forward_parameters(batch_shape)
if 'batch_prob' not in params:
params['batch_prob'] = torch.tensor([True] * batch_shape[0])
warnings.warn(
'`batch_prob` is not found in params. Will assume applying on all data.'
)
self._params = params
output = self.apply_func(in_tensor, in_transform, self._params,
return_transform)
return _transform_output_shape(output, ori_shape
) if self.keepdim else output
class AugmentationBase2D(_AugmentationBase):
"""AugmentationBase2D base class for customized augmentation implementations.
For any augmentation, the implementation of "generate_parameters" and "apply_transform" are required while the
"compute_transformation" is only required when passing "return_transform" as True.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation probabilities
element-wisely for a batch.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
return_transform (bool): if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def __check_batching__(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]'):
if isinstance(input, tuple):
inp, mat = input
if len(inp.shape) == 4:
assert len(mat.shape
) == 3, 'Input tensor is in batch mode but transformation matrix is not'
assert mat.shape[0] == inp.shape[0
], f'In batch dimension, input has {inp.shape[0]}but transformation matrix has {mat.shape[0]}'
elif len(inp.shape) == 3 or len(inp.shape) == 2:
assert len(mat.shape
) == 2, 'Input tensor is in non-batch mode but transformation matrix is not'
else:
raise ValueError(
f'Unrecognized output shape. Expected 2, 3, or 4, got {len(inp.shape)}'
)
def transform_tensor(self, input: 'torch.Tensor') ->torch.Tensor:
"""Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W)."""
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.
float32, torch.float64])
return _transform_input(input)
def identity_matrix(self, input) ->torch.Tensor:
"""Return 3x3 identity matrix."""
return kornia.eye_like(3, input)
class IntensityAugmentationBase2D(AugmentationBase2D):
"""IntensityAugmentationBase2D base class for customized intensity augmentation implementations.
For any augmentation, the implementation of "generate_parameters" and "apply_transform" are required while the
"compute_transformation" is only required when passing "return_transform" as True.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation probabilities
element-wisely for a batch.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
return_transform (bool): if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def compute_transformation(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->torch.Tensor:
return self.identity_matrix(input)
class ParamItem(NamedTuple):
name: 'str'
data: 'Union[dict, list]'
class ImageSequential(nn.Sequential):
"""Sequential for creating kornia image processing pipeline.
Args:
*args : a list of kornia augmentation and image operation modules.
same_on_batch: apply the same transformation across the batch.
If None, it will not overwrite the function-wise settings.
return_transform: if ``True`` return the matrix describing the transformation
applied to each. If None, it will not overwrite the function-wise settings.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). If None, it will not overwrite the function-wise settings.
random_apply: randomly select a sublist (order agnostic) of args to
apply transformation.
If int, a fixed number of transformations will be selected.
If (a,), x number of transformations (a <= x <= len(args)) will be selected.
If (a, b), x number of transformations (a <= x <= b) will be selected.
If True, the whole list of args will be processed as a sequence in a random order.
If False, the whole list of args will be processed as a sequence in original order.
Returns:
Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: the tensor (, and the transformation matrix)
has been sequentially modified by the args.
Examples:
>>> import kornia
>>> input = torch.randn(2, 3, 5, 6)
>>> aug_list = ImageSequential(
... kornia.color.BgrToRgb(),
... kornia.augmentation.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
... kornia.filters.MedianBlur((3, 3)),
... kornia.augmentation.RandomAffine(360, p=1.0),
... kornia.enhance.Invert(),
... return_transform=True,
... same_on_batch=True,
... random_apply=10,
... )
>>> out = aug_list(input)
>>> out[0].shape, out[1].shape
(torch.Size([2, 3, 5, 6]), torch.Size([2, 3, 3]))
Reproduce with provided params.
>>> out2 = aug_list(input, params=aug_list._params)
>>> torch.equal(out[0], out2[0]), torch.equal(out[1], out2[1])
(True, True)
Note:
Transformation matrix returned only considers the transformation applied in ``kornia.augmentation`` module.
Those transformations in ``kornia.geometry`` will not be taken into account.
"""
def __init__(self, *args: nn.Module, same_on_batch: Optional[bool]=None,
return_transform: Optional[bool]=None, keepdim: Optional[bool]=None,
random_apply: Union[int, bool, Tuple[int, int]]=False) ->None:
self.same_on_batch = same_on_batch
self.return_transform = return_transform
self.keepdim = keepdim
_args = OrderedDict()
for idx, arg in enumerate(args):
if not isinstance(arg, nn.Module):
raise NotImplementedError(
f'Only nn.Module are supported at this moment. Got {arg}.')
if isinstance(arg, _AugmentationBase):
if same_on_batch is not None:
arg.same_on_batch = same_on_batch
if return_transform is not None:
arg.return_transform = return_transform
if keepdim is not None:
arg.keepdim = keepdim
_args.update({f'{arg.__class__.__name__}_{idx}': arg})
super(ImageSequential, self).__init__(_args)
self._params: 'List[Any]' = []
self.random_apply: 'Union[Tuple[int, int], bool]'
if random_apply:
if isinstance(random_apply, (bool,)) and random_apply is True:
self.random_apply = len(args), len(args) + 1
elif isinstance(random_apply, (int,)):
self.random_apply = random_apply, random_apply + 1
elif isinstance(random_apply, (tuple,)) and len(random_apply
) == 2 and isinstance(random_apply[0], (int,)) and isinstance(
random_apply[1], (int,)):
self.random_apply = random_apply[0], random_apply[1] + 1
elif isinstance(random_apply, (tuple,)) and len(random_apply
) == 1 and isinstance(random_apply[0], (int,)):
self.random_apply = random_apply[0], len(args) + 1
else:
raise ValueError(
f'Non-readable random_apply. Got {random_apply}.')
assert isinstance(self.random_apply, (tuple,)) and len(self.
random_apply) == 2 and isinstance(self.random_apply[0], (int,)
) and isinstance(self.random_apply[0], (int,)
), f'Expect a tuple of (int, int). Got {self.random_apply}.'
else:
self.random_apply = False
def _get_child_sequence(self) ->Iterator[Tuple[str, nn.Module]]:
if self.random_apply:
num_samples = int(torch.randint(*self.random_apply, (1,)).item())
indices = torch.multinomial(torch.ones((len(self),)),
num_samples, replacement=True if num_samples > len(self) else
False)
return self._get_children_by_indices(indices)
return self.named_children()
def _get_children_by_indices(self, indices: 'torch.Tensor') ->Iterator[
Tuple[str, nn.Module]]:
modules = list(self.named_children())
for idx in indices:
yield modules[idx]
def _get_children_by_module_names(self, names: 'List[str]') ->Iterator[
Tuple[str, nn.Module]]:
modules = list(self.named_children())
for name in names:
yield modules[list(dict(self.named_children()).keys()).index(name)]
def get_forward_sequence(self, params: 'Optional[List[ParamItem]]'=None
) ->Iterator[Tuple[str, nn.Module]]:
if params is None:
named_modules = self._get_child_sequence()
else:
named_modules = self._get_children_by_module_names([p.name for
p in params])
return named_modules
def apply_to_input(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]',
module_name: 'str', module: 'Optional[nn.Module]'=None, param:
'Optional[ParamItem]'=None) ->Union[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
if module is None:
module = self.get_submodule(module_name)
if param is not None:
assert module_name == param.name
_param = param.data
else:
_param = None
if isinstance(module, (_AugmentationBase, ImageSequential)
) and _param is None:
input = module(input)
self._params.append(ParamItem(module_name, module._params))
elif isinstance(module, (_AugmentationBase, ImageSequential)
) and _param is not None:
input = module(input, params=_param)
self._params.append(ParamItem(module_name, _param))
else:
assert _param == {
} or _param is None, f'Non-augmentaion operation {module_name} require empty parameters. Got {module}.'
if isinstance(input, (tuple, list)):
input = module(input[0]), input[1]
else:
input = module(input)
self._params.append(ParamItem(module_name, {}))
return input
def forward(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', params:
'Optional[List[ParamItem]]'=None) ->Union[torch.Tensor, Tuple[torch
.Tensor, torch.Tensor]]:
self._params = []
named_modules = self.get_forward_sequence(params)
params = [] if params is None else params
for (name, module), param in zip_longest(named_modules, params):
input = self.apply_to_input(input, name, module, param=param)
return input
class ColorJitter(IntensityAugmentationBase2D):
"""Applies a random transformation to the brightness, contrast, saturation and hue of a tensor image.
.. image:: _static/img/ColorJitter.png
Args:
p: probability of applying the transformation.
brightness: The brightness factor to apply.
contrast: The contrast factor to apply.
saturation: The saturation factor to apply.
hue: The hue factor to apply.
return_transform: if ``True`` return the matrix describing the transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch: apply the same transformation across the batch.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
Shape:
- Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`
- Output: :math:`(B, C, H, W)`
Note:
Input tensor must be float and normalized into [0, 1] for the best differentiability support.
Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the
applied transformation will be merged int to the input transformation tensor and returned.
Examples:
>>> rng = torch.manual_seed(0)
>>> inputs = torch.ones(1, 3, 3, 3)
>>> aug = ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.)
>>> aug(inputs)
tensor([[[[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993]],
<BLANKLINE>
[[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993]],
<BLANKLINE>
[[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993]]]])
"""
def __init__(self, brightness:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]'=0.0,
contrast:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]'=0.0,
saturation:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]'=0.0,
hue: 'Union[torch.Tensor, float, Tuple[float, float], List[float]]'
=0.0, return_transform: 'bool'=False, same_on_batch: 'bool'=False,
p: 'float'=1.0, keepdim: 'bool'=False) ->None:
super(ColorJitter, self).__init__(p=p, return_transform=
return_transform, same_on_batch=same_on_batch, keepdim=keepdim)
self._device, self._dtype = _extract_device_dtype([brightness,
contrast, hue, saturation])
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def __repr__(self) ->str:
repr = (
f'brightness={self.brightness}, contrast={self.contrast}, saturation={self.saturation}, hue={self.hue}'
)
return self.__class__.__name__ + f'({repr}, {super().__repr__()})'
def generate_parameters(self, batch_shape: 'torch.Size') ->Dict[str,
torch.Tensor]:
brightness: 'torch.Tensor' = _range_bound(self.brightness,
'brightness', center=1.0, bounds=(0, 2), device=self._device,
dtype=self._dtype)
contrast: 'torch.Tensor' = _range_bound(self.contrast, 'contrast',
center=1.0, device=self._device, dtype=self._dtype)
saturation: 'torch.Tensor' = _range_bound(self.saturation,
'saturation', center=1.0, device=self._device, dtype=self._dtype)
hue: 'torch.Tensor' = _range_bound(self.hue, 'hue', bounds=(-0.5,
0.5), device=self._device, dtype=self._dtype)
return rg.random_color_jitter_generator(batch_shape[0], brightness,
contrast, saturation, hue, self.same_on_batch, self.device,
self.dtype)
def apply_transform(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]', transform: 'Optional[torch.Tensor]'=None
) ->torch.Tensor:
transforms = [lambda img: adjust_brightness(img, params[
'brightness_factor'] - 1), lambda img: adjust_contrast(img,
params['contrast_factor']), lambda img: adjust_saturation(img,
params['saturation_factor']), lambda img: adjust_hue(img,
params['hue_factor'] * 2 * pi)]
jittered = input
for idx in params['order'].tolist():
t = transforms[idx]
jittered = t(jittered)
return jittered
class PatchSequential(ImageSequential):
"""Container for performing patch-level image processing.
.. image:: https://kornia-tutorials.readthedocs.io/en/latest/_images/data_patch_sequential_5_1.png
PatchSequential breaks input images into patches by a given grid size, which will be resembled back
afterwards. Different image processing and augmentation methods will be performed on each patch region.
Args:
*args: a list of processing modules.
grid_size: controls the grid board seperation.
padding: same or valid padding. If same padding, it will pad to include all pixels if the input
tensor cannot be divisible by grid_size. If valid padding, the redundent border will be removed.
same_on_batch: apply the same transformation across the batch.
If None, it will not overwrite the function-wise settings.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). If None, it will not overwrite the function-wise settings.
patchwise_apply: apply image processing args will be applied patch-wisely.
if ``True``, the number of args must be equal to grid number.
if ``False``, the image processing args will be applied as a sequence to all patches. Default: False.
random_apply: randomly select a sublist (order agnostic) of args to
apply transformation.
If ``int`` (batchwise mode only), a fixed number of transformations will be selected.
If ``(a,)`` (batchwise mode only), x number of transformations (a <= x <= len(args)) will be selected.
If ``(a, b)`` (batchwise mode only), x number of transformations (a <= x <= b) will be selected.
If ``True``, the whole list of args will be processed in a random order.
If ``False``, the whole list of args will be processed in original order.
Return:
List[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]]: the tensor (, and the transformation matrix)
has been sequentially modified by the args.
Examples:
>>> import kornia.augmentation as K
>>> input = torch.randn(2, 3, 224, 224)
>>> seq = PatchSequential(
... ImageSequential(
... K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
... K.RandomPerspective(0.2, p=0.5),
... K.RandomSolarize(0.1, 0.1, p=0.5),
... ),
... K.RandomAffine(360, p=1.0),
... ImageSequential(
... K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
... K.RandomPerspective(0.2, p=0.5),
... K.RandomSolarize(0.1, 0.1, p=0.5),
... ),
... K.RandomSolarize(0.1, 0.1, p=0.1),
... grid_size=(2,2),
... patchwise_apply=False,
... same_on_batch=True,
... random_apply=True,
... )
>>> out = seq(input)
>>> out.shape
torch.Size([2, 3, 224, 224])
>>> out1 = seq(input, seq._params)
>>> torch.equal(out, out1)
True
"""
def __init__(self, *args: nn.Module, grid_size: Tuple[int, int]=(4, 4),
padding: str='same', same_on_batch: Optional[bool]=None, keepdim:
Optional[bool]=None, patchwise_apply: bool=False, random_apply:
Union[int, bool, Tuple[int, int]]=False) ->None:
_random_apply: 'Optional[Union[int, Tuple[int, int]]]'
if patchwise_apply and random_apply is True:
_random_apply = grid_size[0] * grid_size[1], grid_size[0
] * grid_size[1]
elif patchwise_apply and random_apply is False:
assert len(args) == grid_size[0] * grid_size[1
], f'The number of processing modules must be equal with grid size.Got {len(args)} and {grid_size[0] * grid_size[1]}.'
_random_apply = random_apply
elif patchwise_apply and isinstance(random_apply, (int, tuple)):
raise ValueError(
f'Only boolean value allowed when `patchwise_apply` is set to True. Got {random_apply}.'
)
else:
_random_apply = random_apply
super(PatchSequential, self).__init__(*args, same_on_batch=
same_on_batch, return_transform=False, keepdim=keepdim,
random_apply=_random_apply)
assert padding in ['same', 'valid'
], f'`padding` must be either `same` or `valid`. Got {padding}.'
self.grid_size = grid_size
self.padding = padding
self.patchwise_apply = patchwise_apply
def is_intensity_only(self) ->bool:
"""Check if all transformations are intensity-based.
Note: patch processing would break the continuity of labels (e.g. bbounding boxes, masks).
"""
for arg in self.children():
if isinstance(arg, (ImageSequential,)):
for _arg in arg.children():
if not isinstance(_arg, IntensityAugmentationBase2D):
return False
elif not isinstance(_arg, IntensityAugmentationBase2D):
return False
return True
def __repeat_param_across_patches__(self, param: 'torch.Tensor',
patch_num: 'int') ->torch.Tensor:
"""Repeat parameters across patches.
The input is shaped as (B, ...), while to output (B * patch_num, ...), which
to guarentee that the same transformation would happen for each patch index.
(B1, B2, ..., Bn) => (B1, ... Bn, B1, ..., Bn, ..., B1, ..., Bn)
| pt_size | | pt_size | ..., | pt_size |
"""
repeated = torch.cat([param] * patch_num, dim=0)
return repeated
def compute_padding(self, input: 'torch.Tensor', padding: 'str',
grid_size: 'Optional[Tuple[int, int]]'=None) ->Tuple[int, int, int, int
]:
if grid_size is None:
grid_size = self.grid_size
if padding == 'valid':
ph, pw = input.size(-2) // grid_size[0], input.size(-1
) // grid_size[1]
return -pw // 2, pw // 2 - pw, -ph // 2, ph // 2 - ph
elif padding == 'same':
ph = input.size(-2) - input.size(-2) // grid_size[0] * grid_size[0]
pw = input.size(-1) - input.size(-1) // grid_size[1] * grid_size[1]
return pw // 2, pw - pw // 2, ph // 2, ph - ph // 2
else:
raise NotImplementedError(
f"Expect `padding` as either 'valid' or 'same'. Got {padding}."
)
def extract_patches(self, input: 'torch.Tensor', grid_size:
'Optional[Tuple[int, int]]'=None, pad:
'Optional[Tuple[int, int, int, int]]'=None) ->torch.Tensor:
"""Extract patches from tensor.
Example:
>>> import kornia.augmentation as K
>>> pas = PatchSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0))
>>> pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2))
tensor([[[[[ 0, 1],
[ 4, 5]]],
<BLANKLINE>
<BLANKLINE>
[[[ 2, 3],
[ 6, 7]]],
<BLANKLINE>
<BLANKLINE>
[[[ 8, 9],
[12, 13]]],
<BLANKLINE>
<BLANKLINE>
[[[10, 11],
[14, 15]]]]])
>>> pas.extract_patches(torch.arange(54).view(1, 1, 6, 9), grid_size=(2, 2), pad=(-1, -1, -2, -2))
tensor([[[[[19, 20, 21]]],
<BLANKLINE>
<BLANKLINE>
[[[22, 23, 24]]],
<BLANKLINE>
<BLANKLINE>
[[[28, 29, 30]]],
<BLANKLINE>
<BLANKLINE>
[[[31, 32, 33]]]]])
"""
if pad is not None:
input = torch.nn.functional.pad(input, list(pad))
if grid_size is None:
grid_size = self.grid_size
window_size = input.size(-2) // grid_size[-2], input.size(-1
) // grid_size[-1]
stride = window_size
return extract_tensor_patches(input, window_size, stride)
def restore_from_patches(self, patches: 'torch.Tensor', grid_size:
'Tuple[int, int]'=(4, 4), pad:
'Optional[Tuple[int, int, int, int]]'=None) ->torch.Tensor:
"""Restore input from patches.
Example:
>>> import kornia.augmentation as K
>>> pas = PatchSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0))
>>> out = pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2))
>>> pas.restore_from_patches(out, grid_size=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
if grid_size is None:
grid_size = self.grid_size
patches_tensor = patches.view(-1, grid_size[0], grid_size[1], *
patches.shape[-3:])
restored_tensor = torch.cat(torch.chunk(patches_tensor, grid_size[0
], dim=1), -2).squeeze(1)
restored_tensor = torch.cat(torch.chunk(restored_tensor, grid_size[
1], dim=1), -1).squeeze(1)
if pad is not None:
restored_tensor = torch.nn.functional.pad(restored_tensor, [(-i
) for i in pad])
return restored_tensor
def forward_patchwise(self, input: 'torch.Tensor', params:
'Optional[List[List[ParamItem]]]'=None) ->torch.Tensor:
if params is None:
params = [[]] * input.size(1)
auglist = [self.get_forward_sequence() for _ in range(input.
size(1))]
else:
auglist = [self.get_forward_sequence(p) for p in params]
assert input.size(0) == len(auglist) == len(params)
out = []
self._params = []
for inp, proc, param in zip(input, auglist, params):
o = []
p = []
for inp_pat, (proc_name, proc_pat), _param in zip_longest(inp,
proc, param):
if isinstance(proc_pat, (_AugmentationBase, ImageSequential)):
o.append(proc_pat(inp_pat[None], _param.data if _param
is not None else None))
p.append(ParamItem(proc_name, proc_pat._params))
else:
o.append(proc_pat(inp_pat[None]))
p.append(ParamItem(proc_name, {}))
out.append(torch.cat(o, dim=0))
self._params.append(p)
input = torch.stack(out, dim=0)
return input
def forward_batchwise(self, input: 'torch.Tensor', params:
'Optional[List[ParamItem]]'=None) ->torch.Tensor:
if self.same_on_batch:
batch_shape = input.size(1), *input.shape[-3:]
patch_num = input.size(0)
else:
batch_shape = input.size(0) * input.size(1), *input.shape[-3:]
if params is None:
params = []
for name, aug in self.get_forward_sequence():
if isinstance(aug, _AugmentationBase):
aug.same_on_batch = False
param = aug.forward_parameters(batch_shape)
if self.same_on_batch:
for k, v in param.items():
if not (k == 'order' and isinstance(aug,
ColorJitter)):
param.update({k: self.
__repeat_param_across_patches__(v,
patch_num)})
aug.same_on_batch = True
else:
param = None
params.append(ParamItem(name, param))
input = super().forward(input.view(-1, *input.shape[-3:]), params)
return input
def forward(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', params:
'Optional[Union[List[ParamItem], List[List[ParamItem]]]]'=None
) ->Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Input transformation will be returned if input is a tuple."""
if isinstance(input, (tuple,)):
pad = self.compute_padding(input[0], self.padding)
input = self.extract_patches(input[0], self.grid_size, pad), input[
1]
else:
pad = self.compute_padding(input, self.padding)
input = self.extract_patches(input, self.grid_size, pad)
if not self.patchwise_apply:
params = cast(List[ParamItem], params)
if isinstance(input, (tuple,)):
input = self.forward_batchwise(input[0], params), input[1]
else:
input = self.forward_batchwise(input, params)
else:
params = cast(List[List[ParamItem]], params)
if isinstance(input, (tuple,)):
input = self.forward_patchwise(input[0], params), input[1]
else:
input = self.forward_patchwise(input, params)
if isinstance(input, (tuple,)):
input = self.restore_from_patches(input[0], self.grid_size, pad=pad
), input[1]
else:
input = self.restore_from_patches(input, self.grid_size, pad=pad)
return input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import warnings
from typing import Dict
from typing import Optional
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from typing import cast
from typing import List
from typing import Union
from torch.distributions import Bernoulli
from itertools import zip_longest
from collections import OrderedDict
from typing import Any
from typing import Iterator
from typing import NamedTuple
from torch.nn.modules.utils import _pair
from math import pi
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x5 = xindex // 16
x6 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp7 = tmp5 < tmp3
tmp8 = tmp7 & tmp4
tmp9 = tl.load(in_ptr0 + (16 * x2 + 64 * x3 + 16 * x3 % 16), tmp8 &
xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp5 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp5 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tmp13 & tmp4
tmp15 = tl.load(in_ptr0 + (4 + 16 * x5), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp5 >= tmp11
tmp17 = tl.full([1], 3, tl.int64)
tmp18 = tmp5 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tmp19 & tmp4
tmp21 = tl.load(in_ptr0 + (8 + 16 * x5), tmp20 & xmask, eviction_policy
='evict_last', other=0.0)
tmp22 = tmp5 >= tmp17
tl.full([1], 4, tl.int64)
tmp25 = tmp22 & tmp4
tmp26 = tl.load(in_ptr0 + (12 + 16 * x5), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.where(tmp19, tmp21, tmp26)
tmp28 = tl.where(tmp13, tmp15, tmp27)
tmp29 = tl.where(tmp7, tmp9, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp4, tmp29, tmp30)
tmp32 = tmp0 >= tmp3
tmp33 = tmp0 < tmp11
tmp34 = tmp32 & tmp33
tmp35 = tmp7 & tmp34
tmp36 = tl.load(in_ptr0 + (1 + 16 * x5), tmp35 & xmask, eviction_policy
='evict_last', other=0.0)
tmp37 = tmp13 & tmp34
tmp38 = tl.load(in_ptr0 + (5 + 16 * x5), tmp37 & xmask, eviction_policy
='evict_last', other=0.0)
tmp39 = tmp19 & tmp34
tmp40 = tl.load(in_ptr0 + (9 + 16 * x5), tmp39 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tmp22 & tmp34
tmp42 = tl.load(in_ptr0 + (13 + 16 * x5), tmp41 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tl.where(tmp19, tmp40, tmp42)
tmp44 = tl.where(tmp13, tmp38, tmp43)
tmp45 = tl.where(tmp7, tmp36, tmp44)
tmp46 = tl.full(tmp45.shape, 0.0, tmp45.dtype)
tmp47 = tl.where(tmp34, tmp45, tmp46)
tmp48 = tmp0 >= tmp11
tmp49 = tmp0 < tmp17
tmp50 = tmp48 & tmp49
tmp51 = tmp7 & tmp50
tmp52 = tl.load(in_ptr0 + (2 + 16 * x5), tmp51 & xmask, eviction_policy
='evict_last', other=0.0)
tmp53 = tmp13 & tmp50
tmp54 = tl.load(in_ptr0 + (6 + 16 * x5), tmp53 & xmask, eviction_policy
='evict_last', other=0.0)
tmp55 = tmp19 & tmp50
tmp56 = tl.load(in_ptr0 + (10 + 16 * x5), tmp55 & xmask,
eviction_policy='evict_last', other=0.0)
tmp57 = tmp22 & tmp50
tmp58 = tl.load(in_ptr0 + (14 + 16 * x5), tmp57 & xmask,
eviction_policy='evict_last', other=0.0)
tmp59 = tl.where(tmp19, tmp56, tmp58)
tmp60 = tl.where(tmp13, tmp54, tmp59)
tmp61 = tl.where(tmp7, tmp52, tmp60)
tmp62 = tl.full(tmp61.shape, 0.0, tmp61.dtype)
tmp63 = tl.where(tmp50, tmp61, tmp62)
tmp64 = tmp0 >= tmp17
tmp66 = tmp7 & tmp64
tmp67 = tl.load(in_ptr0 + (3 + 16 * x5), tmp66 & xmask, eviction_policy
='evict_last', other=0.0)
tmp68 = tmp13 & tmp64
tmp69 = tl.load(in_ptr0 + (7 + 16 * x5), tmp68 & xmask, eviction_policy
='evict_last', other=0.0)
tmp70 = tmp19 & tmp64
tmp71 = tl.load(in_ptr0 + (11 + 16 * x5), tmp70 & xmask,
eviction_policy='evict_last', other=0.0)
tmp72 = tmp22 & tmp64
tmp73 = tl.load(in_ptr0 + (15 + 16 * x5), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp19, tmp71, tmp73)
tmp75 = tl.where(tmp13, tmp69, tmp74)
tmp76 = tl.where(tmp7, tmp67, tmp75)
tmp77 = tl.full(tmp76.shape, 0.0, tmp76.dtype)
tmp78 = tl.where(tmp64, tmp76, tmp77)
tmp79 = tl.where(tmp50, tmp63, tmp78)
tmp80 = tl.where(tmp34, tmp47, tmp79)
tmp81 = tl.where(tmp4, tmp31, tmp80)
tl.store(out_ptr0 + x6, tmp81, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def _adapted_sampling(shape: 'Union[Tuple, torch.Size]', dist:
'torch.distributions.Distribution', same_on_batch=False) ->torch.Tensor:
"""The uniform sampling function that accepts 'same_on_batch'.
If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]).
By default, same_on_batch is set to False.
"""
if same_on_batch:
return dist.sample((1, *shape[1:])).repeat(shape[0], *([1] * (len(
shape) - 1)))
return dist.sample(shape)
def _transform_output_shape(output:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', shape: 'Tuple'
) ->Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""Collapse the broadcasted batch dimensions an input tensor to be the specified shape.
Args:
input: torch.Tensor
shape: List/tuple of int
Returns:
torch.Tensor
"""
is_tuple = isinstance(output, tuple)
out_tensor: 'torch.Tensor'
trans_matrix: 'Optional[torch.Tensor]'
if is_tuple:
out_tensor, trans_matrix = cast(Tuple[torch.Tensor, torch.Tensor],
output)
else:
out_tensor = cast(torch.Tensor, output)
trans_matrix = None
if trans_matrix is not None:
if len(out_tensor.shape) > len(shape):
assert trans_matrix.shape[0
] == 1, f'Dimension 0 of transformation matrix is expected to be 1, got {trans_matrix.shape[0]}'
trans_matrix = trans_matrix.squeeze(0)
for dim in range(len(out_tensor.shape) - len(shape)):
assert out_tensor.shape[0
] == 1, f'Dimension {dim} of input is expected to be 1, got {out_tensor.shape[0]}'
out_tensor = out_tensor.squeeze(0)
return (out_tensor, trans_matrix) if is_tuple else out_tensor
def _transform_input(input: 'torch.Tensor') ->torch.Tensor:
"""Reshape an input tensor to be (*, C, H, W). Accept either (H, W), (C, H, W) or (*, C, H, W).
Args:
input: torch.Tensor
Returns:
torch.Tensor
"""
if not torch.is_tensor(input):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if len(input.shape) not in [2, 3, 4]:
raise ValueError(
f'Input size must have a shape of either (H, W), (C, H, W) or (*, C, H, W). Got {input.shape}'
)
if len(input.shape) == 2:
input = input.unsqueeze(0)
if len(input.shape) == 3:
input = input.unsqueeze(0)
return input
def _validate_input_dtype(input: 'torch.Tensor', accepted_dtypes: 'List'
) ->None:
"""Check if the dtype of the input tensor is in the range of accepted_dtypes
Args:
input: torch.Tensor
accepted_dtypes: List. e.g. [torch.float32, torch.float64]
"""
if input.dtype not in accepted_dtypes:
raise TypeError(
f'Expected input of {accepted_dtypes}. Got {input.dtype}')
def _extract_device_dtype(tensor_list: 'List[Optional[Any]]') ->Tuple[torch
.device, torch.dtype]:
"""Check if all the input are in the same device (only if when they are torch.Tensor).
If so, it would return a tuple of (device, dtype). Default: (cpu, ``get_default_dtype()``).
Returns:
[torch.device, torch.dtype]
"""
device, dtype = None, None
for tensor in tensor_list:
if tensor is not None:
if not isinstance(tensor, (torch.Tensor,)):
continue
_device = tensor.device
_dtype = tensor.dtype
if device is None and dtype is None:
device = _device
dtype = _dtype
elif device != _device or dtype != _dtype:
raise ValueError(
f'Passed values are not in the same device and dtype.Got ({device}, {dtype}) and ({_device}, {_dtype}).'
)
if device is None:
device = torch.device('cpu')
if dtype is None:
dtype = torch.get_default_dtype()
return device, dtype
def _joint_range_check(ranged_factor: 'torch.Tensor', name: 'str', bounds:
'Optional[Tuple[float, float]]'=None) ->None:
"""check if bounds[0] <= ranged_factor[0] <= ranged_factor[1] <= bounds[1]"""
if bounds is None:
bounds = float('-inf'), float('inf')
if ranged_factor.dim() == 1 and len(ranged_factor) == 2:
if not bounds[0] <= ranged_factor[0] or not bounds[1] >= ranged_factor[
1]:
raise ValueError(
f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.'
)
if not bounds[0] <= ranged_factor[0] <= ranged_factor[1] <= bounds[1]:
raise ValueError(
f'{name}[0] should be smaller than {name}[1] got {ranged_factor}'
)
else:
raise TypeError(
f'{name} should be a tensor with length 2 whose values between {bounds}. Got {ranged_factor}.'
)
def _singular_range_check(ranged_factor: 'torch.Tensor', name: 'str',
bounds: 'Optional[Tuple[float, float]]'=None, skip_none: 'bool'=False,
mode: 'str'='2d') ->None:
"""check if bounds[0] <= ranged_factor[0] <= bounds[1] and bounds[0] <= ranged_factor[1] <= bounds[1]"""
if mode == '2d':
dim_size = 2
elif mode == '3d':
dim_size = 3
else:
raise ValueError(f"'mode' shall be either 2d or 3d. Got {mode}")
if skip_none and ranged_factor is None:
return
if bounds is None:
bounds = float('-inf'), float('inf')
if ranged_factor.dim() == 1 and len(ranged_factor) == dim_size:
for f in ranged_factor:
if not bounds[0] <= f <= bounds[1]:
raise ValueError(
f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.'
)
else:
raise TypeError(
f'{name} should be a float number or a tuple with length {dim_size} whose values between {bounds}.Got {ranged_factor}'
)
def _range_bound(factor:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]', name:
'str', center: 'float'=0.0, bounds: 'Tuple[float, float]'=(0, float(
'inf')), check: 'Optional[str]'='joint', device: 'torch.device'=torch.
device('cpu'), dtype: 'torch.dtype'=torch.get_default_dtype()
) ->torch.Tensor:
"""Check inputs and compute the corresponding factor bounds"""
if not isinstance(factor, torch.Tensor):
factor = torch.tensor(factor, device=device, dtype=dtype)
factor_bound: 'torch.Tensor'
if factor.dim() == 0:
if factor < 0:
raise ValueError(
f'If {name} is a single number number, it must be non negative. Got {factor}'
)
factor_bound = factor.repeat(2) * torch.tensor([-1.0, 1.0], device=
factor.device, dtype=factor.dtype) + center
factor_bound = factor_bound.clamp(bounds[0], bounds[1])
else:
factor_bound = torch.as_tensor(factor, device=device, dtype=dtype)
if check is not None:
if check == 'joint':
_joint_range_check(factor_bound, name, bounds)
elif check == 'singular':
_singular_range_check(factor_bound, name, bounds)
else:
raise NotImplementedError(f"methods '{check}' not implemented.")
return factor_bound
def adjust_brightness(input: 'torch.Tensor', brightness_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust Brightness of an image.
.. image:: _static/img/adjust_brightness.png
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input: image to be adjusted in the shape of :math:`(*, N)`.
brightness_factor: Brightness adjust factor per element
in the batch. 0 does not modify the input image while any other number modify the
brightness.
Return:
Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 2, 2)
>>> adjust_brightness(x, 1.)
tensor([[[[1., 1.],
[1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.tensor([0.25, 0.50])
>>> adjust_brightness(x, y).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(brightness_factor, (float, torch.Tensor)):
raise TypeError(
f'The factor should be either a float or torch.Tensor. Got {type(brightness_factor)}'
)
if isinstance(brightness_factor, float):
brightness_factor = torch.tensor([brightness_factor])
brightness_factor = brightness_factor.to(input.device)
for _ in input.shape[1:]:
brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)
x_adjust: 'torch.Tensor' = input + brightness_factor
out: 'torch.Tensor' = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_contrast(input: 'torch.Tensor', contrast_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust Contrast of an image.
.. image:: _static/img/adjust_contrast.png
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input: Image to be adjusted in the shape of :math:`(*, N)`.
contrast_factor: Contrast adjust factor per element
in the batch. 0 generates a completely black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Return:
Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 2, 2)
>>> adjust_contrast(x, 0.5)
tensor([[[[0.5000, 0.5000],
[0.5000, 0.5000]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.tensor([0.65, 0.50])
>>> adjust_contrast(x, y).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(contrast_factor, (float, torch.Tensor)):
raise TypeError(
f'The factor should be either a float or torch.Tensor. Got {type(contrast_factor)}'
)
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(input.device)
if (contrast_factor < 0).any():
raise ValueError(
f'Contrast factor must be non-negative. Got {contrast_factor}')
for _ in input.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
x_adjust: 'torch.Tensor' = input * contrast_factor
out: 'torch.Tensor' = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_hue_raw(input: 'torch.Tensor', hue_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust hue of an image. Expecting input to be in hsv format already."""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(hue_factor, (float, torch.Tensor)):
raise TypeError(
f'The hue_factor should be a float number or torch.Tensor in the range between [-PI, PI]. Got {type(hue_factor)}'
)
if isinstance(hue_factor, float):
hue_factor = torch.as_tensor(hue_factor)
hue_factor = hue_factor
for _ in input.shape[1:]:
hue_factor = torch.unsqueeze(hue_factor, dim=-1)
h, s, v = torch.chunk(input, chunks=3, dim=-3)
divisor: 'float' = 2 * pi
h_out: 'torch.Tensor' = torch.fmod(h + hue_factor, divisor)
out: 'torch.Tensor' = torch.cat([h_out, s, v], dim=-3)
return out
def hsv_to_rgb(image: 'torch.Tensor') ->torch.Tensor:
"""Convert an image from HSV to RGB.
The H channel values are assumed to be in the range 0..2pi. S and V are in the range 0..1.
Args:
image: HSV Image to be converted to HSV with shape of :math:`(*, 3, H, W)`.
Returns:
RGB version of the image with shape of :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = hsv_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W). Got {}'
.format(image.shape))
h: 'torch.Tensor' = image[..., 0, :, :] / (2 * math.pi)
s: 'torch.Tensor' = image[..., 1, :, :]
v: 'torch.Tensor' = image[..., 2, :, :]
hi: 'torch.Tensor' = torch.floor(h * 6) % 6
f: 'torch.Tensor' = h * 6 % 6 - hi
one: 'torch.Tensor' = torch.tensor(1.0).to(image.device)
p: 'torch.Tensor' = v * (one - s)
q: 'torch.Tensor' = v * (one - f * s)
t: 'torch.Tensor' = v * (one - (one - f) * s)
hi = hi.long()
indices: 'torch.Tensor' = torch.stack([hi, hi + 6, hi + 12], dim=-3)
out = torch.stack((v, q, p, p, t, v, t, v, v, q, p, p, p, p, t, v, v, q
), dim=-3)
out = torch.gather(out, -3, indices)
return out
def rgb_to_hsv(image: 'torch.Tensor', eps: 'float'=1e-06) ->torch.Tensor:
"""Convert an image from RGB to HSV.
.. image:: _static/img/rgb_to_hsv.png
The image data is assumed to be in the range of (0, 1).
Args:
image: RGB Image to be converted to HSV with shape of :math:`(*, 3, H, W)`.
eps: scalar to enforce numarical stability.
Returns:
HSV version of the image with shape of :math:`(*, 3, H, W)`.
The H channel values are in the range 0..2pi. S and V are in the range 0..1.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_hsv(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError('Input type is not a torch.Tensor. Got {}'.format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError('Input size must have a shape of (*, 3, H, W). Got {}'
.format(image.shape))
maxc, _ = image.max(-3)
maxc_mask = image == maxc.unsqueeze(-3)
_, max_indices = ((maxc_mask.cumsum(-3) == 1) & maxc_mask).max(-3)
minc: 'torch.Tensor' = image.min(-3)[0]
v: 'torch.Tensor' = maxc
deltac: 'torch.Tensor' = maxc - minc
s: 'torch.Tensor' = deltac / (v + eps)
deltac = torch.where(deltac == 0, torch.ones_like(deltac, device=deltac
.device, dtype=deltac.dtype), deltac)
maxc_tmp = maxc.unsqueeze(-3) - image
rc: 'torch.Tensor' = maxc_tmp[..., 0, :, :]
gc: 'torch.Tensor' = maxc_tmp[..., 1, :, :]
bc: 'torch.Tensor' = maxc_tmp[..., 2, :, :]
h = torch.stack([bc - gc, 2.0 * deltac + rc - bc, 4.0 * deltac + gc -
rc], dim=-3)
h = torch.gather(h, dim=-3, index=max_indices[..., None, :, :])
h = h.squeeze(-3)
h = h / deltac
h = h / 6.0 % 1.0
h = 2 * math.pi * h
return torch.stack([h, s, v], dim=-3)
def adjust_hue(input: 'torch.Tensor', hue_factor: 'Union[float, torch.Tensor]'
) ->torch.Tensor:
"""Adjust hue of an image.
.. image:: _static/img/adjust_hue.png
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input: Image to be adjusted in the shape of :math:`(*, 3, H, W)`.
hue_factor: How much to shift the hue channel. Should be in [-PI, PI]. PI
and -PI give complete reversal of hue channel in HSV space in positive and negative
direction respectively. 0 means no shift. Therefore, both -PI and PI will give an
image with complementary colors while 0 gives the original image.
Return:
Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 2, 2)
>>> adjust_hue(x, 3.141516).shape
torch.Size([1, 3, 2, 2])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2) * 3.141516
>>> adjust_hue(x, y).shape
torch.Size([2, 3, 3, 3])
"""
x_hsv: 'torch.Tensor' = rgb_to_hsv(input)
x_adjusted: 'torch.Tensor' = adjust_hue_raw(x_hsv, hue_factor)
out: 'torch.Tensor' = hsv_to_rgb(x_adjusted)
return out
def adjust_saturation_raw(input: 'torch.Tensor', saturation_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust color saturation of an image. Expecting input to be in hsv format already."""
if not isinstance(input, torch.Tensor):
raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}')
if not isinstance(saturation_factor, (float, torch.Tensor)):
raise TypeError(
f'The saturation_factor should be a float number or torch.Tensor.Got {type(saturation_factor)}'
)
if isinstance(saturation_factor, float):
saturation_factor = torch.as_tensor(saturation_factor)
saturation_factor = saturation_factor.to(input.device)
for _ in input.shape[1:]:
saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)
h, s, v = torch.chunk(input, chunks=3, dim=-3)
s_out: 'torch.Tensor' = torch.clamp(s * saturation_factor, min=0, max=1)
out: 'torch.Tensor' = torch.cat([h, s_out, v], dim=-3)
return out
def adjust_saturation(input: 'torch.Tensor', saturation_factor:
'Union[float, torch.Tensor]') ->torch.Tensor:
"""Adjust color saturation of an image.
.. image:: _static/img/adjust_saturation.png
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input: Image/Tensor to be adjusted in the shape of :math:`(*, 3, H, W)`.
saturation_factor: How much to adjust the saturation. 0 will give a black
and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2.
Return:
Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> adjust_saturation(x, 2.).shape
torch.Size([1, 3, 3, 3])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.tensor([1., 2.])
>>> adjust_saturation(x, y).shape
torch.Size([2, 3, 3, 3])
"""
x_hsv: 'torch.Tensor' = rgb_to_hsv(input)
x_adjusted: 'torch.Tensor' = adjust_saturation_raw(x_hsv, saturation_factor
)
out: 'torch.Tensor' = hsv_to_rgb(x_adjusted)
return out
def _extract_tensor_patchesnd(input: 'torch.Tensor', window_sizes:
'Tuple[int, ...]', strides: 'Tuple[int, ...]') ->torch.Tensor:
batch_size, num_channels = input.size()[:2]
dims = range(2, input.dim())
for dim, patch_size, stride in zip(dims, window_sizes, strides):
input = input.unfold(dim, patch_size, stride)
input = input.permute(0, *dims, 1, *[(dim + len(dims)) for dim in dims]
).contiguous()
return input.view(batch_size, -1, num_channels, *window_sizes)
def extract_tensor_patches(input: 'torch.Tensor', window_size:
'Union[int, Tuple[int, int]]', stride: 'Union[int, Tuple[int, int]]'=1,
padding: 'Union[int, Tuple[int, int]]'=0) ->torch.Tensor:
"""Function that extract patches from tensors and stack them.
See :class:`~kornia.contrib.ExtractTensorPatches` for details.
"""
if not torch.is_tensor(input):
raise TypeError('Input input type is not a torch.Tensor. Got {}'.
format(type(input)))
if not len(input.shape) == 4:
raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'.
format(input.shape))
if padding:
pad_vert, pad_horz = _pair(padding)
input = F.pad(input, [pad_horz, pad_horz, pad_vert, pad_vert])
return _extract_tensor_patchesnd(input, _pair(window_size), _pair(stride))
class _BasicAugmentationBase(nn.Module):
"""_BasicAugmentationBase base class for customized augmentation implementations.
Plain augmentation base class without the functionality of transformation matrix calculations.
By default, the random computations will be happened on CPU with ``torch.get_default_dtype()``.
To change this behaviour, please use ``set_rng_device_and_dtype``.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation
probabilities element-wisely.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def __init__(self, p: 'float'=0.5, p_batch: 'float'=1.0, same_on_batch:
'bool'=False, keepdim: 'bool'=False) ->None:
super(_BasicAugmentationBase, self).__init__()
self.p = p
self.p_batch = p_batch
self.same_on_batch = same_on_batch
self.keepdim = keepdim
self._params: 'Dict[str, torch.Tensor]' = {}
if p != 0.0 or p != 1.0:
self._p_gen = Bernoulli(self.p)
if p_batch != 0.0 or p_batch != 1.0:
self._p_batch_gen = Bernoulli(self.p_batch)
self.set_rng_device_and_dtype(torch.device('cpu'), torch.
get_default_dtype())
def __repr__(self) ->str:
return (
f'p={self.p}, p_batch={self.p_batch}, same_on_batch={self.same_on_batch}'
)
def __unpack_input__(self, input: 'torch.Tensor') ->torch.Tensor:
return input
def __check_batching__(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]'):
"""Check if a transformation matrix is returned,
it has to be in the same batching mode as output."""
raise NotImplementedError
def transform_tensor(self, input: 'torch.Tensor') ->torch.Tensor:
"""Standardize input tensors."""
raise NotImplementedError
def generate_parameters(self, batch_shape: 'torch.Size') ->Dict[str,
torch.Tensor]:
return {}
def apply_transform(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->torch.Tensor:
raise NotImplementedError
def set_rng_device_and_dtype(self, device: 'torch.device', dtype:
'torch.dtype') ->None:
"""Change the random generation device and dtype.
Note:
The generated random numbers are not reproducible across different devices and dtypes.
"""
self.device = device
self.dtype = dtype
def __batch_prob_generator__(self, batch_shape: 'torch.Size', p:
'float', p_batch: 'float', same_on_batch: 'bool') ->torch.Tensor:
batch_prob: 'torch.Tensor'
if p_batch == 1:
batch_prob = torch.tensor([True])
elif p_batch == 0:
batch_prob = torch.tensor([False])
else:
batch_prob = _adapted_sampling((1,), self._p_batch_gen,
same_on_batch).bool()
if batch_prob.sum().item() == 1:
elem_prob: 'torch.Tensor'
if p == 1:
elem_prob = torch.tensor([True] * batch_shape[0])
elif p == 0:
elem_prob = torch.tensor([False] * batch_shape[0])
else:
elem_prob = _adapted_sampling((batch_shape[0],), self.
_p_gen, same_on_batch).bool()
batch_prob = batch_prob * elem_prob
else:
batch_prob = batch_prob.repeat(batch_shape[0])
return batch_prob
def forward_parameters(self, batch_shape):
to_apply = self.__batch_prob_generator__(batch_shape, self.p, self.
p_batch, self.same_on_batch)
_params = self.generate_parameters(torch.Size((int(to_apply.sum().
item()), *batch_shape[1:])))
if _params is None:
_params = {}
_params['batch_prob'] = to_apply
return _params
def apply_func(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->Union[torch.Tensor, Tuple[torch.Tensor,
torch.Tensor]]:
input = self.transform_tensor(input)
return self.apply_transform(input, params)
def forward(self, input: 'torch.Tensor', params:
'Optional[Dict[str, torch.Tensor]]'=None) ->Union[torch.Tensor,
Tuple[torch.Tensor, torch.Tensor]]:
in_tensor = self.__unpack_input__(input)
self.__check_batching__(input)
ori_shape = in_tensor.shape
in_tensor = self.transform_tensor(in_tensor)
batch_shape = in_tensor.shape
if params is None:
params = self.forward_parameters(batch_shape)
self._params = params
output = self.apply_func(input, self._params)
return _transform_output_shape(output, ori_shape
) if self.keepdim else output
class _AugmentationBase(_BasicAugmentationBase):
"""_AugmentationBase base class for customized augmentation implementations.
Advanced augmentation base class with the functionality of transformation matrix calculations.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation probabilities
element-wisely for a batch.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
return_transform (bool): if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def __init__(self, return_transform: 'bool'=None, same_on_batch: 'bool'
=False, p: 'float'=0.5, p_batch: 'float'=1.0, keepdim: 'bool'=False
) ->None:
super(_AugmentationBase, self).__init__(p, p_batch=p_batch,
same_on_batch=same_on_batch, keepdim=keepdim)
self.p = p
self.p_batch = p_batch
self.return_transform = return_transform
def __repr__(self) ->str:
return super().__repr__(
) + f', return_transform={self.return_transform}'
def identity_matrix(self, input: 'torch.Tensor') ->torch.Tensor:
raise NotImplementedError
def compute_transformation(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->torch.Tensor:
raise NotImplementedError
def apply_transform(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]', transform: 'Optional[torch.Tensor]'=None
) ->torch.Tensor:
raise NotImplementedError
def __unpack_input__(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]') ->Tuple[
torch.Tensor, Optional[torch.Tensor]]:
if isinstance(input, tuple):
in_tensor = input[0]
in_transformation = input[1]
return in_tensor, in_transformation
in_tensor = input
return in_tensor, None
def apply_func(self, in_tensor: 'torch.Tensor', in_transform:
'Optional[torch.Tensor]', params: 'Dict[str, torch.Tensor]',
return_transform: 'bool'=False) ->Union[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
to_apply = params['batch_prob']
if torch.sum(to_apply) == 0:
output = in_tensor
trans_matrix = self.identity_matrix(in_tensor)
elif torch.sum(to_apply) == len(to_apply):
trans_matrix = self.compute_transformation(in_tensor, params)
output = self.apply_transform(in_tensor, params, trans_matrix)
else:
output = in_tensor.clone()
trans_matrix = self.identity_matrix(in_tensor)
trans_matrix[to_apply] = self.compute_transformation(in_tensor[
to_apply], params)
output[to_apply] = self.apply_transform(in_tensor[to_apply],
params, trans_matrix[to_apply])
self._transform_matrix = trans_matrix
if return_transform:
out_transformation = (trans_matrix if in_transform is None else
trans_matrix @ in_transform)
return output, out_transformation
if in_transform is not None:
return output, in_transform
return output
def forward(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', params:
'Optional[Dict[str, torch.Tensor]]'=None, return_transform:
'Optional[bool]'=None) ->Union[torch.Tensor, Tuple[torch.Tensor,
torch.Tensor]]:
in_tensor, in_transform = self.__unpack_input__(input)
self.__check_batching__(input)
ori_shape = in_tensor.shape
in_tensor = self.transform_tensor(in_tensor)
batch_shape = in_tensor.shape
if return_transform is None:
return_transform = self.return_transform
return_transform = cast(bool, return_transform)
if params is None:
params = self.forward_parameters(batch_shape)
if 'batch_prob' not in params:
params['batch_prob'] = torch.tensor([True] * batch_shape[0])
warnings.warn(
'`batch_prob` is not found in params. Will assume applying on all data.'
)
self._params = params
output = self.apply_func(in_tensor, in_transform, self._params,
return_transform)
return _transform_output_shape(output, ori_shape
) if self.keepdim else output
class AugmentationBase2D(_AugmentationBase):
"""AugmentationBase2D base class for customized augmentation implementations.
For any augmentation, the implementation of "generate_parameters" and "apply_transform" are required while the
"compute_transformation" is only required when passing "return_transform" as True.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation probabilities
element-wisely for a batch.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
return_transform (bool): if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def __check_batching__(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]'):
if isinstance(input, tuple):
inp, mat = input
if len(inp.shape) == 4:
assert len(mat.shape
) == 3, 'Input tensor is in batch mode but transformation matrix is not'
assert mat.shape[0] == inp.shape[0
], f'In batch dimension, input has {inp.shape[0]}but transformation matrix has {mat.shape[0]}'
elif len(inp.shape) == 3 or len(inp.shape) == 2:
assert len(mat.shape
) == 2, 'Input tensor is in non-batch mode but transformation matrix is not'
else:
raise ValueError(
f'Unrecognized output shape. Expected 2, 3, or 4, got {len(inp.shape)}'
)
def transform_tensor(self, input: 'torch.Tensor') ->torch.Tensor:
"""Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W)."""
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.
float32, torch.float64])
return _transform_input(input)
def identity_matrix(self, input) ->torch.Tensor:
"""Return 3x3 identity matrix."""
return kornia.eye_like(3, input)
class IntensityAugmentationBase2D(AugmentationBase2D):
"""IntensityAugmentationBase2D base class for customized intensity augmentation implementations.
For any augmentation, the implementation of "generate_parameters" and "apply_transform" are required while the
"compute_transformation" is only required when passing "return_transform" as True.
Args:
p (float): probability for applying an augmentation. This param controls the augmentation probabilities
element-wisely for a batch.
p_batch (float): probability for applying an augmentation to a batch. This param controls the augmentation
probabilities batch-wisely.
return_transform (bool): if ``True`` return the matrix describing the geometric transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch (bool): apply the same transformation across the batch. Default: False.
keepdim (bool): whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). Default: False.
"""
def compute_transformation(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]') ->torch.Tensor:
return self.identity_matrix(input)
class ParamItem(NamedTuple):
name: 'str'
data: 'Union[dict, list]'
class ImageSequential(nn.Sequential):
"""Sequential for creating kornia image processing pipeline.
Args:
*args : a list of kornia augmentation and image operation modules.
same_on_batch: apply the same transformation across the batch.
If None, it will not overwrite the function-wise settings.
return_transform: if ``True`` return the matrix describing the transformation
applied to each. If None, it will not overwrite the function-wise settings.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). If None, it will not overwrite the function-wise settings.
random_apply: randomly select a sublist (order agnostic) of args to
apply transformation.
If int, a fixed number of transformations will be selected.
If (a,), x number of transformations (a <= x <= len(args)) will be selected.
If (a, b), x number of transformations (a <= x <= b) will be selected.
If True, the whole list of args will be processed as a sequence in a random order.
If False, the whole list of args will be processed as a sequence in original order.
Returns:
Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: the tensor (, and the transformation matrix)
has been sequentially modified by the args.
Examples:
>>> import kornia
>>> input = torch.randn(2, 3, 5, 6)
>>> aug_list = ImageSequential(
... kornia.color.BgrToRgb(),
... kornia.augmentation.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),
... kornia.filters.MedianBlur((3, 3)),
... kornia.augmentation.RandomAffine(360, p=1.0),
... kornia.enhance.Invert(),
... return_transform=True,
... same_on_batch=True,
... random_apply=10,
... )
>>> out = aug_list(input)
>>> out[0].shape, out[1].shape
(torch.Size([2, 3, 5, 6]), torch.Size([2, 3, 3]))
Reproduce with provided params.
>>> out2 = aug_list(input, params=aug_list._params)
>>> torch.equal(out[0], out2[0]), torch.equal(out[1], out2[1])
(True, True)
Note:
Transformation matrix returned only considers the transformation applied in ``kornia.augmentation`` module.
Those transformations in ``kornia.geometry`` will not be taken into account.
"""
def __init__(self, *args: nn.Module, same_on_batch: Optional[bool]=None,
return_transform: Optional[bool]=None, keepdim: Optional[bool]=None,
random_apply: Union[int, bool, Tuple[int, int]]=False) ->None:
self.same_on_batch = same_on_batch
self.return_transform = return_transform
self.keepdim = keepdim
_args = OrderedDict()
for idx, arg in enumerate(args):
if not isinstance(arg, nn.Module):
raise NotImplementedError(
f'Only nn.Module are supported at this moment. Got {arg}.')
if isinstance(arg, _AugmentationBase):
if same_on_batch is not None:
arg.same_on_batch = same_on_batch
if return_transform is not None:
arg.return_transform = return_transform
if keepdim is not None:
arg.keepdim = keepdim
_args.update({f'{arg.__class__.__name__}_{idx}': arg})
super(ImageSequential, self).__init__(_args)
self._params: 'List[Any]' = []
self.random_apply: 'Union[Tuple[int, int], bool]'
if random_apply:
if isinstance(random_apply, (bool,)) and random_apply is True:
self.random_apply = len(args), len(args) + 1
elif isinstance(random_apply, (int,)):
self.random_apply = random_apply, random_apply + 1
elif isinstance(random_apply, (tuple,)) and len(random_apply
) == 2 and isinstance(random_apply[0], (int,)) and isinstance(
random_apply[1], (int,)):
self.random_apply = random_apply[0], random_apply[1] + 1
elif isinstance(random_apply, (tuple,)) and len(random_apply
) == 1 and isinstance(random_apply[0], (int,)):
self.random_apply = random_apply[0], len(args) + 1
else:
raise ValueError(
f'Non-readable random_apply. Got {random_apply}.')
assert isinstance(self.random_apply, (tuple,)) and len(self.
random_apply) == 2 and isinstance(self.random_apply[0], (int,)
) and isinstance(self.random_apply[0], (int,)
), f'Expect a tuple of (int, int). Got {self.random_apply}.'
else:
self.random_apply = False
def _get_child_sequence(self) ->Iterator[Tuple[str, nn.Module]]:
if self.random_apply:
num_samples = int(torch.randint(*self.random_apply, (1,)).item())
indices = torch.multinomial(torch.ones((len(self),)),
num_samples, replacement=True if num_samples > len(self) else
False)
return self._get_children_by_indices(indices)
return self.named_children()
def _get_children_by_indices(self, indices: 'torch.Tensor') ->Iterator[
Tuple[str, nn.Module]]:
modules = list(self.named_children())
for idx in indices:
yield modules[idx]
def _get_children_by_module_names(self, names: 'List[str]') ->Iterator[
Tuple[str, nn.Module]]:
modules = list(self.named_children())
for name in names:
yield modules[list(dict(self.named_children()).keys()).index(name)]
def get_forward_sequence(self, params: 'Optional[List[ParamItem]]'=None
) ->Iterator[Tuple[str, nn.Module]]:
if params is None:
named_modules = self._get_child_sequence()
else:
named_modules = self._get_children_by_module_names([p.name for
p in params])
return named_modules
def apply_to_input(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]',
module_name: 'str', module: 'Optional[nn.Module]'=None, param:
'Optional[ParamItem]'=None) ->Union[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
if module is None:
module = self.get_submodule(module_name)
if param is not None:
assert module_name == param.name
_param = param.data
else:
_param = None
if isinstance(module, (_AugmentationBase, ImageSequential)
) and _param is None:
input = module(input)
self._params.append(ParamItem(module_name, module._params))
elif isinstance(module, (_AugmentationBase, ImageSequential)
) and _param is not None:
input = module(input, params=_param)
self._params.append(ParamItem(module_name, _param))
else:
assert _param == {
} or _param is None, f'Non-augmentaion operation {module_name} require empty parameters. Got {module}.'
if isinstance(input, (tuple, list)):
input = module(input[0]), input[1]
else:
input = module(input)
self._params.append(ParamItem(module_name, {}))
return input
def forward(self, input:
'Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]', params:
'Optional[List[ParamItem]]'=None) ->Union[torch.Tensor, Tuple[torch
.Tensor, torch.Tensor]]:
self._params = []
named_modules = self.get_forward_sequence(params)
params = [] if params is None else params
for (name, module), param in zip_longest(named_modules, params):
input = self.apply_to_input(input, name, module, param=param)
return input
class ColorJitter(IntensityAugmentationBase2D):
"""Applies a random transformation to the brightness, contrast, saturation and hue of a tensor image.
.. image:: _static/img/ColorJitter.png
Args:
p: probability of applying the transformation.
brightness: The brightness factor to apply.
contrast: The contrast factor to apply.
saturation: The saturation factor to apply.
hue: The hue factor to apply.
return_transform: if ``True`` return the matrix describing the transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation
wont be concatenated.
same_on_batch: apply the same transformation across the batch.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
Shape:
- Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`
- Output: :math:`(B, C, H, W)`
Note:
Input tensor must be float and normalized into [0, 1] for the best differentiability support.
Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the
applied transformation will be merged int to the input transformation tensor and returned.
Examples:
>>> rng = torch.manual_seed(0)
>>> inputs = torch.ones(1, 3, 3, 3)
>>> aug = ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.)
>>> aug(inputs)
tensor([[[[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993]],
<BLANKLINE>
[[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993]],
<BLANKLINE>
[[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993],
[0.9993, 0.9993, 0.9993]]]])
"""
def __init__(self, brightness:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]'=0.0,
contrast:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]'=0.0,
saturation:
'Union[torch.Tensor, float, Tuple[float, float], List[float]]'=0.0,
hue: 'Union[torch.Tensor, float, Tuple[float, float], List[float]]'
=0.0, return_transform: 'bool'=False, same_on_batch: 'bool'=False,
p: 'float'=1.0, keepdim: 'bool'=False) ->None:
super(ColorJitter, self).__init__(p=p, return_transform=
return_transform, same_on_batch=same_on_batch, keepdim=keepdim)
self._device, self._dtype = _extract_device_dtype([brightness,
contrast, hue, saturation])
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def __repr__(self) ->str:
repr = (
f'brightness={self.brightness}, contrast={self.contrast}, saturation={self.saturation}, hue={self.hue}'
)
return self.__class__.__name__ + f'({repr}, {super().__repr__()})'
def generate_parameters(self, batch_shape: 'torch.Size') ->Dict[str,
torch.Tensor]:
brightness: 'torch.Tensor' = _range_bound(self.brightness,
'brightness', center=1.0, bounds=(0, 2), device=self._device,
dtype=self._dtype)
contrast: 'torch.Tensor' = _range_bound(self.contrast, 'contrast',
center=1.0, device=self._device, dtype=self._dtype)
saturation: 'torch.Tensor' = _range_bound(self.saturation,
'saturation', center=1.0, device=self._device, dtype=self._dtype)
hue: 'torch.Tensor' = _range_bound(self.hue, 'hue', bounds=(-0.5,
0.5), device=self._device, dtype=self._dtype)
return rg.random_color_jitter_generator(batch_shape[0], brightness,
contrast, saturation, hue, self.same_on_batch, self.device,
self.dtype)
def apply_transform(self, input: 'torch.Tensor', params:
'Dict[str, torch.Tensor]', transform: 'Optional[torch.Tensor]'=None
) ->torch.Tensor:
transforms = [lambda img: adjust_brightness(img, params[
'brightness_factor'] - 1), lambda img: adjust_contrast(img,
params['contrast_factor']), lambda img: adjust_saturation(img,
params['saturation_factor']), lambda img: adjust_hue(img,
params['hue_factor'] * 2 * pi)]
jittered = input
for idx in params['order'].tolist():
t = transforms[idx]
jittered = t(jittered)
return jittered
class PatchSequentialNew(ImageSequential):
"""Container for performing patch-level image processing.
.. image:: https://kornia-tutorials.readthedocs.io/en/latest/_images/data_patch_sequential_5_1.png
PatchSequential breaks input images into patches by a given grid size, which will be resembled back
afterwards. Different image processing and augmentation methods will be performed on each patch region.
Args:
*args: a list of processing modules.
grid_size: controls the grid board seperation.
padding: same or valid padding. If same padding, it will pad to include all pixels if the input
tensor cannot be divisible by grid_size. If valid padding, the redundent border will be removed.
same_on_batch: apply the same transformation across the batch.
If None, it will not overwrite the function-wise settings.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False). If None, it will not overwrite the function-wise settings.
patchwise_apply: apply image processing args will be applied patch-wisely.
if ``True``, the number of args must be equal to grid number.
if ``False``, the image processing args will be applied as a sequence to all patches. Default: False.
random_apply: randomly select a sublist (order agnostic) of args to
apply transformation.
If ``int`` (batchwise mode only), a fixed number of transformations will be selected.
If ``(a,)`` (batchwise mode only), x number of transformations (a <= x <= len(args)) will be selected.
If ``(a, b)`` (batchwise mode only), x number of transformations (a <= x <= b) will be selected.
If ``True``, the whole list of args will be processed in a random order.
If ``False``, the whole list of args will be processed in original order.
Return:
List[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]]: the tensor (, and the transformation matrix)
has been sequentially modified by the args.
Examples:
>>> import kornia.augmentation as K
>>> input = torch.randn(2, 3, 224, 224)
>>> seq = PatchSequential(
... ImageSequential(
... K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
... K.RandomPerspective(0.2, p=0.5),
... K.RandomSolarize(0.1, 0.1, p=0.5),
... ),
... K.RandomAffine(360, p=1.0),
... ImageSequential(
... K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),
... K.RandomPerspective(0.2, p=0.5),
... K.RandomSolarize(0.1, 0.1, p=0.5),
... ),
... K.RandomSolarize(0.1, 0.1, p=0.1),
... grid_size=(2,2),
... patchwise_apply=False,
... same_on_batch=True,
... random_apply=True,
... )
>>> out = seq(input)
>>> out.shape
torch.Size([2, 3, 224, 224])
>>> out1 = seq(input, seq._params)
>>> torch.equal(out, out1)
True
"""
def __init__(self, *args: nn.Module, grid_size: Tuple[int, int]=(4, 4),
padding: str='same', same_on_batch: Optional[bool]=None, keepdim:
Optional[bool]=None, patchwise_apply: bool=False, random_apply:
Union[int, bool, Tuple[int, int]]=False) ->None:
_random_apply: 'Optional[Union[int, Tuple[int, int]]]'
if patchwise_apply and random_apply is True:
_random_apply = grid_size[0] * grid_size[1], grid_size[0
] * grid_size[1]
elif patchwise_apply and random_apply is False:
assert len(args) == grid_size[0] * grid_size[1
], f'The number of processing modules must be equal with grid size.Got {len(args)} and {grid_size[0] * grid_size[1]}.'
_random_apply = random_apply
elif patchwise_apply and isinstance(random_apply, (int, tuple)):
raise ValueError(
f'Only boolean value allowed when `patchwise_apply` is set to True. Got {random_apply}.'
)
else:
_random_apply = random_apply
super(PatchSequentialNew, self).__init__(*args, same_on_batch=
same_on_batch, return_transform=False, keepdim=keepdim,
random_apply=_random_apply)
assert padding in ['same', 'valid'
], f'`padding` must be either `same` or `valid`. Got {padding}.'
self.grid_size = grid_size
self.padding = padding
self.patchwise_apply = patchwise_apply
def is_intensity_only(self) ->bool:
"""Check if all transformations are intensity-based.
Note: patch processing would break the continuity of labels (e.g. bbounding boxes, masks).
"""
for arg in self.children():
if isinstance(arg, (ImageSequential,)):
for _arg in arg.children():
if not isinstance(_arg, IntensityAugmentationBase2D):
return False
elif not isinstance(_arg, IntensityAugmentationBase2D):
return False
return True
def __repeat_param_across_patches__(self, param: 'torch.Tensor',
patch_num: 'int') ->torch.Tensor:
"""Repeat parameters across patches.
The input is shaped as (B, ...), while to output (B * patch_num, ...), which
to guarentee that the same transformation would happen for each patch index.
(B1, B2, ..., Bn) => (B1, ... Bn, B1, ..., Bn, ..., B1, ..., Bn)
| pt_size | | pt_size | ..., | pt_size |
"""
repeated = torch.cat([param] * patch_num, dim=0)
return repeated
def compute_padding(self, input: 'torch.Tensor', padding: 'str',
grid_size: 'Optional[Tuple[int, int]]'=None) ->Tuple[int, int, int, int
]:
if grid_size is None:
grid_size = self.grid_size
if padding == 'valid':
ph, pw = input.size(-2) // grid_size[0], input.size(-1
) // grid_size[1]
return -pw // 2, pw // 2 - pw, -ph // 2, ph // 2 - ph
elif padding == 'same':
ph = input.size(-2) - input.size(-2) // grid_size[0] * grid_size[0]
pw = input.size(-1) - input.size(-1) // grid_size[1] * grid_size[1]
return pw // 2, pw - pw // 2, ph // 2, ph - ph // 2
else:
raise NotImplementedError(
f"Expect `padding` as either 'valid' or 'same'. Got {padding}."
)
def extract_patches(self, input: 'torch.Tensor', grid_size:
'Optional[Tuple[int, int]]'=None, pad:
'Optional[Tuple[int, int, int, int]]'=None) ->torch.Tensor:
"""Extract patches from tensor.
Example:
>>> import kornia.augmentation as K
>>> pas = PatchSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0))
>>> pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2))
tensor([[[[[ 0, 1],
[ 4, 5]]],
<BLANKLINE>
<BLANKLINE>
[[[ 2, 3],
[ 6, 7]]],
<BLANKLINE>
<BLANKLINE>
[[[ 8, 9],
[12, 13]]],
<BLANKLINE>
<BLANKLINE>
[[[10, 11],
[14, 15]]]]])
>>> pas.extract_patches(torch.arange(54).view(1, 1, 6, 9), grid_size=(2, 2), pad=(-1, -1, -2, -2))
tensor([[[[[19, 20, 21]]],
<BLANKLINE>
<BLANKLINE>
[[[22, 23, 24]]],
<BLANKLINE>
<BLANKLINE>
[[[28, 29, 30]]],
<BLANKLINE>
<BLANKLINE>
[[[31, 32, 33]]]]])
"""
if pad is not None:
input = torch.nn.functional.pad(input, list(pad))
if grid_size is None:
grid_size = self.grid_size
window_size = input.size(-2) // grid_size[-2], input.size(-1
) // grid_size[-1]
stride = window_size
return extract_tensor_patches(input, window_size, stride)
def restore_from_patches(self, patches: 'torch.Tensor', grid_size:
'Tuple[int, int]'=(4, 4), pad:
'Optional[Tuple[int, int, int, int]]'=None) ->torch.Tensor:
"""Restore input from patches.
Example:
>>> import kornia.augmentation as K
>>> pas = PatchSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0))
>>> out = pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2))
>>> pas.restore_from_patches(out, grid_size=(2, 2))
tensor([[[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]]]])
"""
if grid_size is None:
grid_size = self.grid_size
patches_tensor = patches.view(-1, grid_size[0], grid_size[1], *
patches.shape[-3:])
restored_tensor = torch.cat(torch.chunk(patches_tensor, grid_size[0
], dim=1), -2).squeeze(1)
restored_tensor = torch.cat(torch.chunk(restored_tensor, grid_size[
1], dim=1), -1).squeeze(1)
if pad is not None:
restored_tensor = torch.nn.functional.pad(restored_tensor, [(-i
) for i in pad])
return restored_tensor
def forward_patchwise(self, input: 'torch.Tensor', params:
'Optional[List[List[ParamItem]]]'=None) ->torch.Tensor:
if params is None:
params = [[]] * input.size(1)
auglist = [self.get_forward_sequence() for _ in range(input.
size(1))]
else:
auglist = [self.get_forward_sequence(p) for p in params]
assert input.size(0) == len(auglist) == len(params)
out = []
self._params = []
for inp, proc, param in zip(input, auglist, params):
o = []
p = []
for inp_pat, (proc_name, proc_pat), _param in zip_longest(inp,
proc, param):
if isinstance(proc_pat, (_AugmentationBase, ImageSequential)):
o.append(proc_pat(inp_pat[None], _param.data if _param
is not None else None))
p.append(ParamItem(proc_name, proc_pat._params))
else:
o.append(proc_pat(inp_pat[None]))
p.append(ParamItem(proc_name, {}))
out.append(torch.cat(o, dim=0))
self._params.append(p)
input = torch.stack(out, dim=0)
return input
def forward_batchwise(self, input: 'torch.Tensor', params:
'Optional[List[ParamItem]]'=None) ->torch.Tensor:
if self.same_on_batch:
batch_shape = input.size(1), *input.shape[-3:]
patch_num = input.size(0)
else:
batch_shape = input.size(0) * input.size(1), *input.shape[-3:]
if params is None:
params = []
for name, aug in self.get_forward_sequence():
if isinstance(aug, _AugmentationBase):
aug.same_on_batch = False
param = aug.forward_parameters(batch_shape)
if self.same_on_batch:
for k, v in param.items():
if not (k == 'order' and isinstance(aug,
ColorJitter)):
param.update({k: self.
__repeat_param_across_patches__(v,
patch_num)})
aug.same_on_batch = True
else:
param = None
params.append(ParamItem(name, param))
input = super().forward(input.view(-1, *input.shape[-3:]), params)
return input
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JoanFM/kornia
|
PatchSequential
| false
| 11,576
|
[
"ECL-2.0",
"Apache-2.0"
] | 0
|
808898887cde69074ca3e3df9b24dea9682aad90
|
https://github.com/JoanFM/kornia/tree/808898887cde69074ca3e3df9b24dea9682aad90
|
LinearCombine
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class LinearCombine(nn.Module):
def __init__(self, layers_num, trainable=True, input_aware=False,
word_level=False):
super(LinearCombine, self).__init__()
self.input_aware = input_aware
self.word_level = word_level
if input_aware:
raise NotImplementedError('Input aware is not supported.')
self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 /
layers_num), requires_grad=trainable)
def forward(self, seq):
nw = F.softmax(self.w, dim=0)
seq = torch.mul(seq, nw)
seq = torch.sum(seq, dim=0)
return seq
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'layers_num': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp10 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp13 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp3 = tmp2 - tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp4 / tmp4
tmp6 = tmp0 * tmp5
tmp8 = tmp7 * tmp5
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp5
tmp12 = tmp9 + tmp11
tmp14 = tmp13 * tmp5
tmp15 = tmp12 + tmp14
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0[grid(64)](primals_2, primals_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_2
class LinearCombineNew(nn.Module):
def __init__(self, layers_num, trainable=True, input_aware=False,
word_level=False):
super(LinearCombineNew, self).__init__()
self.input_aware = input_aware
self.word_level = word_level
if input_aware:
raise NotImplementedError('Input aware is not supported.')
self.w = nn.Parameter(torch.full((layers_num, 1, 1, 1), 1.0 /
layers_num), requires_grad=trainable)
def forward(self, input_0):
primals_1 = self.w
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Johnsonms/NNI_master
|
LinearCombine
| false
| 11,577
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
ResidualConvUnit
|
import torch
import torch.fft
import torch.nn as nn
import torch.utils.cpp_extension
class ResidualConvUnit(nn.Module):
def __init__(self, cin, activation, bn):
super().__init__()
self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1,
bias=True)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
return self.skip_add.add(self.conv(x), x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cin': 4, 'activation': 4, 'bn': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.fft
import torch.nn as nn
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2,
primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ResidualConvUnitNew(nn.Module):
def __init__(self, cin, activation, bn):
super().__init__()
self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1,
bias=True)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
CeciLyu/projected_gan
|
ResidualConvUnit
| false
| 11,578
|
[
"MIT"
] | 0
|
5e86ee0c88d47164c30ede37448e7ba7f010fa7b
|
https://github.com/CeciLyu/projected_gan/tree/5e86ee0c88d47164c30ede37448e7ba7f010fa7b
|
Pooling
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class ReLUConvBN(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
padding: int
zero-padding added to both sides of the input
dilation: int
spacing between kernel elements
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation,
bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in,
C_out, kernel_size, stride=stride, padding=padding, dilation=
dilation, bias=False), nn.BatchNorm2d(C_out, affine=bn_affine,
momentum=bn_momentum, track_running_stats=bn_track_running_stats))
def forward(self, x):
"""
Parameters
---
x: torch.Tensor
input tensor
"""
return self.op(x)
class Pooling(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1,
bn_track_running_stats=True):
super(Pooling, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 0, bn_affine,
bn_momentum, bn_track_running_stats)
self.op = nn.AvgPool2d(3, stride=stride, padding=1,
count_include_pad=False)
def forward(self, x):
"""
Parameters
---
x: torch.Tensor
input tensor
"""
if self.preprocess:
x = self.preprocess(x)
return self.op(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'C_in': 4, 'C_out': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -
1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (4 * (4 <= 2 + x0) + (2 + x0
) * (2 + x0 < 4)) * (4 * (4 <= 2 + x1) + (2 + x1) * (2 + x1 < 4)
) + -1 * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (4 * (4 <=
2 + x1) + (2 + x1) * (2 + x1 < 4)) + -1 * (0 * (0 >= -1 + x1) + (-1 +
x1) * (-1 + x1 > 0)) * (4 * (4 <= 2 + x0) + (2 + x0) * (2 + x0 < 4))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x4, tmp53, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ReLUConvBN(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
padding: int
zero-padding added to both sides of the input
dilation: int
spacing between kernel elements
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation,
bn_affine=True, bn_momentum=0.1, bn_track_running_stats=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in,
C_out, kernel_size, stride=stride, padding=padding, dilation=
dilation, bias=False), nn.BatchNorm2d(C_out, affine=bn_affine,
momentum=bn_momentum, track_running_stats=bn_track_running_stats))
def forward(self, x):
"""
Parameters
---
x: torch.Tensor
input tensor
"""
return self.op(x)
class PoolingNew(nn.Module):
"""
Parameters
---
C_in: int
the number of input channels
C_out: int
the number of output channels
stride: int
stride of the convolution
bn_affine: bool
If set to ``True``, ``torch.nn.BatchNorm2d`` will have learnable affine parameters. Default: True
bn_momentun: float
the value used for the running_mean and running_var computation. Default: 0.1
bn_track_running_stats: bool
When set to ``True``, ``torch.nn.BatchNorm2d`` tracks the running mean and variance. Default: True
"""
def __init__(self, C_in, C_out, stride, bn_affine=True, bn_momentum=0.1,
bn_track_running_stats=True):
super(PoolingNew, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 0, bn_affine,
bn_momentum, bn_track_running_stats)
self.op = nn.AvgPool2d(3, stride=stride, padding=1,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
Pooling
| false
| 11,579
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
Interpolate
|
import torch
import torch.fft
import torch.nn as nn
import torch.utils.cpp_extension
class Interpolate(nn.Module):
"""Interpolation module."""
def __init__(self, size, mode='bilinear', align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.size = size
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: interpolated data
"""
x = self.interp(x, size=self.size, mode=self.mode, align_corners=
self.align_corners)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.fft
import torch.nn as nn
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tmp14 = x0
tmp15 = tmp14.to(tl.float32)
tmp16 = tmp15 + tmp2
tmp17 = tmp16 * tmp4
tmp18 = tmp17 - tmp2
tmp19 = triton_helpers.maximum(tmp18, tmp7)
tmp20 = tmp19.to(tl.int32)
tmp21 = tmp20 + tmp10
tmp22 = triton_helpers.minimum(tmp21, tmp12)
tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 - tmp24
tmp26 = tmp20.to(tl.float32)
tmp27 = tmp19 - tmp26
tmp28 = triton_helpers.maximum(tmp27, tmp7)
tmp29 = triton_helpers.minimum(tmp28, tmp4)
tmp30 = tmp25 * tmp29
tmp31 = tmp24 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp9.to(tl.float32)
tmp39 = tmp8 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp7)
tmp41 = triton_helpers.minimum(tmp40, tmp4)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(256)](buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf2,
class InterpolateNew(nn.Module):
"""Interpolation module."""
def __init__(self, size, mode='bilinear', align_corners=False):
"""Init.
Args:
scale_factor (float): scaling
mode (str): interpolation mode
"""
super(InterpolateNew, self).__init__()
self.interp = nn.functional.interpolate
self.size = size
self.mode = mode
self.align_corners = align_corners
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
CeciLyu/projected_gan
|
Interpolate
| false
| 11,580
|
[
"MIT"
] | 0
|
5e86ee0c88d47164c30ede37448e7ba7f010fa7b
|
https://github.com/CeciLyu/projected_gan/tree/5e86ee0c88d47164c30ede37448e7ba7f010fa7b
|
Mask
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class Mask(nn.Module):
def forward(self, seq, mask):
seq_mask = torch.unsqueeze(mask, 2)
seq_mask = torch.transpose(seq_mask.repeat(1, 1, seq.size()[1]), 1, 2)
return seq.where(torch.eq(seq_mask, 1), torch.zeros_like(seq))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_where_zeros_like_0(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y1 = yindex // 4
y0 = yindex % 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y1), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp4 = 0.0
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp5, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_eq_where_zeros_like_0[grid(16, 4)](arg0_1, arg1_1,
buf0, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MaskNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Johnsonms/NNI_master
|
Mask
| false
| 11,581
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
MLP
|
import torch
import torch.nn as nn
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'mid_size': 4, 'out_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLPNew(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0.0, use_relu
=True):
super(MLPNew, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, input_0):
primals_1 = self.fc.linear.weight
primals_2 = self.fc.linear.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JoonseoKang/mcan-cap
|
MLP
| false
| 11,582
|
[
"Apache-2.0"
] | 0
|
788e21fc1bc712018166aa44cc3298264f493f3b
|
https://github.com/JoonseoKang/mcan-cap/tree/788e21fc1bc712018166aa44cc3298264f493f3b
|
InformedSender
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class InformedSender(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSender, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def forward(self, x, return_embeddings=False):
emb = self.return_embeddings(x)
h = self.conv2(emb)
h = torch.sigmoid(h)
h = h.transpose(1, 2)
h = self.conv3(h)
h = torch.sigmoid(h)
h = h.squeeze(dim=1)
h = h.squeeze(dim=1)
h = self.lin4(h)
h = h.mul(1.0 / self.temp)
logits = F.log_softmax(h, dim=1)
return logits
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'game_size': 4, 'feat_size': 4, 'embedding_size': 4,
'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp14 & xmask, eviction_policy
='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp16 & xmask, eviction_policy
='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 100
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 100 * x0), rmask & xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(rmask & xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tl_math.log(tmp13)
tmp15 = tmp8 - tmp14
tl.store(out_ptr2 + (r1 + 100 * x0), tmp15, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_4, (1, 1, 4, 1), (4, 4, 1, 1))
assert_size_stride(primals_5, (100, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, buf1, buf2, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
buf5 = extern_kernels.convolution(buf4, primals_3, stride=(4, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 4), (16, 4, 4, 1))
buf6 = buf5
del buf5
triton_poi_fused_sigmoid_1[grid(64)](buf6, 64, XBLOCK=64, num_warps
=1, num_stages=1)
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 1, 4,
4), (16, 4, 4, 1), 0), primals_4, stride=(4, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 1, 4), (4, 4, 4, 1))
buf8 = buf7
del buf7
triton_poi_fused_sigmoid_2[grid(16)](buf8, 16, XBLOCK=16, num_warps
=1, num_stages=1)
buf9 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 100), (1, 4), 0), out=buf9)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
triton_per_fused__log_softmax_3[grid(4)](buf9, buf12, 4, 100,
XBLOCK=1, num_warps=2, num_stages=1)
del buf9
return buf12, primals_3, primals_4, reinterpret_tensor(primals_1, (4, 4
), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32
), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48
), buf4, buf6, buf8, buf12, primals_5
class InformedSenderNew(nn.Module):
def __init__(self, game_size, feat_size, embedding_size, hidden_size,
vocab_size=100, temp=1.0):
super(InformedSenderNew, self).__init__()
self.game_size = game_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.temp = temp
self.lin1 = nn.Linear(feat_size, embedding_size, bias=False)
self.conv2 = nn.Conv2d(1, hidden_size, kernel_size=(game_size, 1),
stride=(game_size, 1), bias=False)
self.conv3 = nn.Conv2d(1, 1, kernel_size=(hidden_size, 1), stride=(
hidden_size, 1), bias=False)
self.lin4 = nn.Linear(embedding_size, vocab_size, bias=False)
def return_embeddings(self, x):
embs = []
for i in range(self.game_size):
h = x[i]
if len(h.size()) == 3:
h = h.squeeze(dim=-1)
h_i = self.lin1(h)
h_i = h_i.unsqueeze(dim=1)
h_i = h_i.unsqueeze(dim=1)
embs.append(h_i)
h = torch.cat(embs, dim=2)
return h
def forward(self, input_0):
primals_2 = self.lin1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.lin4.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
IA3005/NLP_ens
|
InformedSender
| false
| 11,583
|
[
"MIT"
] | 0
|
794ebbff46d5e6d5476f29b577b40bbb52991246
|
https://github.com/IA3005/NLP_ens/tree/794ebbff46d5e6d5476f29b577b40bbb52991246
|
BinaryExpSquare
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpSquare(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, x):
return torch.exp(-self.beta * torch.square(x[0] - x[1]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = -tmp5
tmp7 = tmp6 * tmp3
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_neg_pow_sub_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
del primals_2
return buf1, buf0, buf1
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryExpSquareNew(nn.Module):
def __init__(self):
super().__init__()
self.beta = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.beta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Johnsonms/NNI_master
|
BinaryExpSquare
| false
| 11,584
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
Hsigmoid
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class Hsigmoid(nn.Module):
"""Hsigmoid activation function."""
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
"""Hsigmoid activation function."""
def __init__(self, inplace=True):
super(HsigmoidNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
Hsigmoid
| false
| 11,585
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
SymmSoftplus
|
import torch
from torch.utils.data import Dataset as Dataset
import torch.utils.data
def symm_softplus(x, softplus_=torch.nn.functional.softplus):
return softplus_(x) - 0.5 * x
class SymmSoftplus(torch.nn.Module):
def forward(self, x):
return symm_softplus(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.utils.data import Dataset as Dataset
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.5
tmp7 = tmp0 * tmp6
tmp8 = tmp5 - tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_softplus_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def symm_softplus(x, softplus_=torch.nn.functional.softplus):
return softplus_(x) - 0.5 * x
class SymmSoftplusNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JunLi-Galios/CP-Flow
|
SymmSoftplus
| false
| 11,586
|
[
"MIT"
] | 0
|
69272636c8c644ce3c96bbc4d610591756b8e3ff
|
https://github.com/JunLi-Galios/CP-Flow/tree/69272636c8c644ce3c96bbc4d610591756b8e3ff
|
InteractiveKLLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class InteractiveKLLoss(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.kl_loss = nn.KLDivLoss()
def forward(self, student, teacher):
return self.kl_loss(F.log_softmax(student / self.temperature, dim=1
), F.softmax(teacher / self.temperature, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + r3, None)
tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = libdevice.isnan(tmp8).to(tl.int1)
tmp10 = 0.0
tmp11 = tmp8 == tmp10
tmp12 = tl_math.log(tmp8)
tmp13 = tmp8 * tmp12
tmp14 = tl.where(tmp11, tmp10, tmp13)
tmp15 = float('nan')
tmp16 = tl.where(tmp9, tmp15, tmp14)
tmp19 = tl_math.exp(tmp18)
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tl_math.log(tmp28)
tmp30 = tmp17 - tmp29
tmp31 = tmp8 * tmp30
tmp32 = tmp16 - tmp31
tmp33 = tl.broadcast_to(tmp32, [RBLOCK])
tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0))
tmp36 = 256.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused__log_softmax__softmax_mean_mul_sub_xlogy_2[grid(1)](
buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class InteractiveKLLossNew(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
self.kl_loss = nn.KLDivLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Johnsonms/NNI_master
|
InteractiveKLLoss
| false
| 11,587
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
GlobalAvgPool1d
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from abc import abstractmethod
from torch.nn import functional
from typing import *
class AvgPool(nn.Module):
"""
AvgPool Module.
"""
def __init__(self):
super().__init__()
@abstractmethod
def forward(self, input_tensor):
pass
class GlobalAvgPool1d(AvgPool):
"""
GlobalAvgPool1d Module.
"""
def forward(self, input_tensor):
return functional.avg_pool1d(input_tensor, input_tensor.size()[2:]
).view(input_tensor.size()[:2])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from abc import abstractmethod
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
class AvgPool(nn.Module):
"""
AvgPool Module.
"""
def __init__(self):
super().__init__()
@abstractmethod
def forward(self, input_tensor):
pass
class GlobalAvgPool1dNew(AvgPool):
"""
GlobalAvgPool1d Module.
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
GlobalAvgPool1d
| false
| 11,588
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BinaryAdd
|
import abc
import inspect
import torch
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryAdd(nn.Module):
def forward(self, x):
return x[0] + x[1]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import abc
import inspect
import warnings
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import Any
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
def get_module_name(cls_or_func):
module_name = cls_or_func.__module__
if module_name == '__main__':
for frm in inspect.stack():
if inspect.getmodule(frm[0]).__name__ == '__main__':
main_file_path = Path(inspect.getsourcefile(frm[0]))
if not Path().samefile(main_file_path.parent):
raise RuntimeError(
f'You are using "{main_file_path}" to launch your experiment, please launch the experiment under the directory where "{main_file_path.name}" is located.'
)
module_name = main_file_path.stem
break
if module_name == '__main__':
warnings.warn(
'Callstack exhausted but main module still not found. This will probably cause issues that the function/class cannot be imported.'
)
if (f'{cls_or_func.__module__}.{cls_or_func.__name__}' ==
'torch.nn.modules.rnn.LSTM'):
module_name = cls_or_func.__module__
return module_name
def reset_uid(namespace: 'str'='default') ->None:
_last_uid[namespace] = 0
def _create_wrapper_cls(cls, store_init_parameters=True, reset_mutation_uid
=False, stop_parsing=True):
class wrapper(cls):
def __init__(self, *args, **kwargs):
self._stop_parsing = stop_parsing
if reset_mutation_uid:
reset_uid('mutation')
if store_init_parameters:
argname_list = list(inspect.signature(cls.__init__).
parameters.keys())[1:]
full_args = {}
full_args.update(kwargs)
assert len(args) <= len(argname_list
), f'Length of {args} is greater than length of {argname_list}.'
for argname, value in zip(argname_list, args):
full_args[argname] = value
args = list(args)
for i, value in enumerate(args):
if isinstance(value, Translatable):
args[i] = value._translate()
for i, value in kwargs.items():
if isinstance(value, Translatable):
kwargs[i] = value._translate()
self._init_parameters = full_args
else:
self._init_parameters = {}
super().__init__(*args, **kwargs)
wrapper.__module__ = get_module_name(cls)
wrapper.__name__ = cls.__name__
wrapper.__qualname__ = cls.__qualname__
wrapper.__init__.__doc__ = cls.__init__.__doc__
return wrapper
def serialize_cls(cls):
"""
To create an serializable class.
"""
return _create_wrapper_cls(cls)
def basic_unit(cls):
"""
To wrap a module as a basic unit, to stop it from parsing and make it mutate-able.
"""
import torch.nn as nn
assert issubclass(cls, nn.Module
), 'When using @basic_unit, the class must be a subclass of nn.Module.'
return serialize_cls(cls)
class Translatable(abc.ABC):
"""
Inherit this class and implement ``translate`` when the inner class needs a different
parameter from the wrapper class in its init function.
"""
@abc.abstractmethod
def _translate(self) ->Any:
pass
@basic_unit
class BinaryAddNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Johnsonms/NNI_master
|
BinaryAdd
| false
| 11,589
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
BackboneModel1
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class BackboneModel1(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1, 1)
def forward(self, x):
return self.conv1(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16384)](buf1, primals_2, 16384,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class BackboneModel1New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Johnsonms/NNI_master
|
BackboneModel1
| false
| 11,590
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
MultiHeadAttention
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
def attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
logits = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
logits = logits.masked_fill(mask == 0, -1000000000.0)
attention_map = F.softmax(logits, dim=-1)
if dropout is not None:
attention_map = dropout(attention_map)
return torch.matmul(attention_map, value)
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout=0.1):
super().__init__()
self.hidden_dim = hidden_dim
self.head_dim = hidden_dim // n_heads
self.n_heads = n_heads
self.q_proj = nn.Linear(hidden_dim, hidden_dim)
self.v_proj = nn.Linear(hidden_dim, hidden_dim)
self.k_proj = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.output_proj = nn.Linear(hidden_dim, hidden_dim)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
k_project = self.k_proj(key)
q_project = self.q_proj(query)
v_project = self.v_proj(value)
k_reshape = k_project.view(batch_size, -1, self.n_heads, self.head_dim
).transpose(1, 2)
q_reshape = q_project.view(batch_size, -1, self.n_heads, self.head_dim
).transpose(1, 2)
v_reshape = v_project.view(batch_size, -1, self.n_heads, self.head_dim
).transpose(1, 2)
scores = attention(q_reshape, k_reshape, v_reshape, mask, self.dropout)
scores = scores.transpose(1, 2).contiguous()
scores = scores.view(batch_size, -1, self.hidden_dim)
return self.output_proj(scores)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'n_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_per_fused_1(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = float('-inf')
tmp12 = tmp0 == tmp11
tmp13 = tmp12 == 0
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 != 0
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = triton_helpers.any(tmp18, 1)[:, None]
tmp20 = tmp19 == 0
tmp21 = tmp6 / tmp10
tmp22 = 0.0
tmp23 = tl.where(tmp20, tmp22, tmp21)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](buf1, primals_6, buf3, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf1, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf1
triton_poi_fused_0[grid(16, 16)](buf0, primals_3, buf4, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused_1[grid(256)](buf5, buf9, 256, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf5
buf10 = reinterpret_tensor(buf0, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf0
triton_poi_fused_2[grid(16, 16)](buf2, primals_8, buf10, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0),
out=buf11)
buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_11
return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0
), reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10
def attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
logits = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
logits = logits.masked_fill(mask == 0, -1000000000.0)
attention_map = F.softmax(logits, dim=-1)
if dropout is not None:
attention_map = dropout(attention_map)
return torch.matmul(attention_map, value)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, hidden_dim, n_heads, dropout=0.1):
super().__init__()
self.hidden_dim = hidden_dim
self.head_dim = hidden_dim // n_heads
self.n_heads = n_heads
self.q_proj = nn.Linear(hidden_dim, hidden_dim)
self.v_proj = nn.Linear(hidden_dim, hidden_dim)
self.k_proj = nn.Linear(hidden_dim, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.output_proj = nn.Linear(hidden_dim, hidden_dim)
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_proj.weight
primals_3 = self.q_proj.bias
primals_5 = self.v_proj.weight
primals_6 = self.v_proj.bias
primals_7 = self.k_proj.weight
primals_8 = self.k_proj.bias
primals_10 = self.output_proj.weight
primals_11 = self.output_proj.bias
primals_1 = input_0
primals_4 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Johnsonms/NNI_master
|
MultiHeadAttention
| false
| 11,591
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
PosLinear2
|
import torch
from torch import Tensor
from torch.utils.data import Dataset as Dataset
import torch.nn as nn
import torch.utils.data
class PosLinear2(torch.nn.Linear):
def forward(self, x: 'Tensor') ->Tensor:
return nn.functional.linear(x, torch.nn.functional.softmax(self.
weight, 1), self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.utils.data import Dataset as Dataset
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](primals_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del buf1
del primals_2
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class PosLinear2New(torch.nn.Linear):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JunLi-Galios/CP-Flow
|
PosLinear2
| false
| 11,592
|
[
"MIT"
] | 0
|
69272636c8c644ce3c96bbc4d610591756b8e3ff
|
https://github.com/JunLi-Galios/CP-Flow/tree/69272636c8c644ce3c96bbc4d610591756b8e3ff
|
ActorCritic
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class ActorCritic(nn.Module):
def __init__(self, num_states, num_actions, hidden_size):
super(ActorCritic, self).__init__()
self.num_actions = num_actions
self.fc = nn.Linear(num_states, hidden_size)
self.critic_linear2 = nn.Linear(hidden_size, 1)
self.actor_linear2 = nn.Linear(hidden_size, num_actions)
def forward(self, state):
x = F.relu(self.fc(state))
value = self.critic_linear2(x)
policy_dist = F.softmax(self.actor_linear2(x))
return value, policy_dist
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_states': 4, 'num_actions': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0
), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf6, primals_6, primals_4, buf7
class ActorCriticNew(nn.Module):
def __init__(self, num_states, num_actions, hidden_size):
super(ActorCriticNew, self).__init__()
self.num_actions = num_actions
self.fc = nn.Linear(num_states, hidden_size)
self.critic_linear2 = nn.Linear(hidden_size, 1)
self.actor_linear2 = nn.Linear(hidden_size, num_actions)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = self.critic_linear2.weight
primals_5 = self.critic_linear2.bias
primals_6 = self.actor_linear2.weight
primals_7 = self.actor_linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
Johnsonms/NNI_master
|
ActorCritic
| false
| 11,593
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
MSELoss
|
import torch
import torch.nn as nn
class MSELoss(nn.Module):
""" Mean-squared error loss """
def __init__(self, reduction='mean', eps=1e-08):
super().__init__()
if reduction not in ('mean', 'sum'):
raise ValueError(
'`reduction` not recognized. must be "mean" or "sum"')
self.reduction = reduction
self.eps = eps
def forward(self, pred, target):
loss = (target - pred) ** 2
loss = torch.mean(loss, 1)
if self.reduction == 'mean':
loss = torch.sum(loss) / len(pred)
elif self.reduction == 'sum':
loss = torch.sum(loss)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = 0.25
tmp25 = tmp23 * tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mean_pow_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSELossNew(nn.Module):
""" Mean-squared error loss """
def __init__(self, reduction='mean', eps=1e-08):
super().__init__()
if reduction not in ('mean', 'sum'):
raise ValueError(
'`reduction` not recognized. must be "mean" or "sum"')
self.reduction = reduction
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
KAGRA-TW-ML/deepclean-prod
|
MSELoss
| false
| 11,594
|
[
"MIT"
] | 0
|
9fb834cb4027fd3b377bc0e763c237235c98eabd
|
https://github.com/KAGRA-TW-ML/deepclean-prod/tree/9fb834cb4027fd3b377bc0e763c237235c98eabd
|
PosLinear
|
import torch
from torch import Tensor
from torch.utils.data import Dataset as Dataset
import torch.nn as nn
import torch.utils.data
class PosLinear(torch.nn.Linear):
def forward(self, x: 'Tensor') ->Tensor:
gain = 1 / x.size(1)
return nn.functional.linear(x, torch.nn.functional.softplus(self.
weight), self.bias) * gain
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.utils.data import Dataset as Dataset
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(16)](primals_2, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0)
class PosLinearNew(torch.nn.Linear):
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JunLi-Galios/CP-Flow
|
PosLinear
| false
| 11,595
|
[
"MIT"
] | 0
|
69272636c8c644ce3c96bbc4d610591756b8e3ff
|
https://github.com/JunLi-Galios/CP-Flow/tree/69272636c8c644ce3c96bbc4d610591756b8e3ff
|
PFLDLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class PFLDLoss(nn.Module):
"""Weighted loss of L2 distance with the pose angle for PFLD."""
def __init__(self):
super(PFLDLoss, self).__init__()
def forward(self, landmark_gt, euler_angle_gt, angle, landmarks):
"""
Calculate weighted L2 loss for PFLD.
Parameters
----------
landmark_gt : tensor
the ground truth of landmarks
euler_angle_gt : tensor
the ground truth of pose angle
angle : tensor
the predicted pose angle
landmarks : float32
the predicted landmarks
Returns
-------
output: tensor
the weighted L2 loss
output: tensor
the normal L2 loss
"""
weight_angle = torch.sum(1 - torch.cos(angle - euler_angle_gt), axis=1)
l2_distant = torch.sum((landmark_gt - landmarks) ** 2, axis=1)
return torch.mean(weight_angle * l2_distant), torch.mean(l2_distant)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr3 + (r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp26 = tl.load(in_ptr3 + (16 + r0 + 64 * r1), None)
tmp31 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr3 + (32 + r0 + 64 * r1), None)
tmp37 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp38 = tl.load(in_ptr3 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp19 - tmp20
tmp22 = tl_math.cos(tmp21)
tmp23 = 1.0
tmp24 = tmp23 - tmp22
tmp27 = tmp25 - tmp26
tmp28 = tl_math.cos(tmp27)
tmp29 = tmp23 - tmp28
tmp30 = tmp24 + tmp29
tmp33 = tmp31 - tmp32
tmp34 = tl_math.cos(tmp33)
tmp35 = tmp23 - tmp34
tmp36 = tmp30 + tmp35
tmp39 = tmp37 - tmp38
tmp40 = tl_math.cos(tmp39)
tmp41 = tmp23 - tmp40
tmp42 = tmp36 + tmp41
tmp43 = tmp42 * tmp18
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp46 = tl.sum(tmp44, 1)[:, None]
tmp47 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = 64.0
tmp51 = tmp46 / tmp50
tmp52 = tmp49 / tmp50
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp52, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
buf5 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0[grid(1)](buf4,
buf5, arg2_1, arg3_1, arg0_1, arg1_1, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf4, buf5
class PFLDLossNew(nn.Module):
"""Weighted loss of L2 distance with the pose angle for PFLD."""
def __init__(self):
super(PFLDLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
Johnsonms/NNI_master
|
PFLDLoss
| false
| 11,596
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
NoiseInjection
|
import torch
from torch import nn
class NoiseInjection(nn.Module):
def __init__(self, channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1))
def forward(self, image, noise):
return image + self.weight * noise
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_3, primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class NoiseInjectionNew(nn.Module):
def __init__(self, channel):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1))
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KUMartin77/AAA738_StyleGAN_pytorch
|
NoiseInjection
| false
| 11,597
|
[
"BSD-2-Clause"
] | 0
|
ed0689102c922d336f53e374e8be2ab532a84ccd
|
https://github.com/KUMartin77/AAA738_StyleGAN_pytorch/tree/ed0689102c922d336f53e374e8be2ab532a84ccd
|
wide_basic
|
import torch
import torch.nn as nn
def get_norm(n_filters, norm):
if norm is None:
return Identity()
elif norm == 'batch':
return nn.BatchNorm2d(n_filters, momentum=0.9)
elif norm == 'instance':
return nn.InstanceNorm2d(n_filters, affine=True)
elif norm == 'layer':
return nn.GroupNorm(1, n_filters)
elif norm == 'act':
return norms.ActNorm(n_filters, False)
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None,
leak=0.2):
super(wide_basic, self).__init__()
self.lrelu = nn.LeakyReLU(leak)
self.bn1 = get_norm(in_planes, norm)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1,
bias=True)
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=
dropout_rate)
self.bn2 = get_norm(planes, norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes,
kernel_size=1, stride=stride, bias=True))
def forward(self, x):
out = self.dropout(self.conv1(self.lrelu(self.bn1(x))))
out = self.conv2(self.lrelu(self.bn2(out)))
out += self.shortcut(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4, 'dropout_rate': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_2[grid(256)](buf5, primals_5,
primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
def get_norm(n_filters, norm):
if norm is None:
return Identity()
elif norm == 'batch':
return nn.BatchNorm2d(n_filters, momentum=0.9)
elif norm == 'instance':
return nn.InstanceNorm2d(n_filters, affine=True)
elif norm == 'layer':
return nn.GroupNorm(1, n_filters)
elif norm == 'act':
return norms.ActNorm(n_filters, False)
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
class wide_basicNew(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None,
leak=0.2):
super(wide_basicNew, self).__init__()
self.lrelu = nn.LeakyReLU(leak)
self.bn1 = get_norm(in_planes, norm)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1,
bias=True)
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=
dropout_rate)
self.bn2 = get_norm(planes, norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes,
kernel_size=1, stride=stride, bias=True))
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JunLi-Galios/JEM
|
wide_basic
| false
| 11,598
|
[
"Apache-2.0"
] | 0
|
dd4d33f64269d3999458f129ac83a3043ad7e63f
|
https://github.com/JunLi-Galios/JEM/tree/dd4d33f64269d3999458f129ac83a3043ad7e63f
|
Softplus
|
import torch
import numpy as np
from torch.utils.data import Dataset as Dataset
import torch.nn as nn
import torch.utils.data
def activation_shifting(activation):
def shifted_activation(x):
return activation(x) - activation(torch.zeros_like(x))
return shifted_activation
def cauchy_softplus(x):
pi = np.pi
return (x * pi - torch.log(x ** 2 + 1) + 2 * x * torch.atan(x)) / (2 * pi)
def gaussian_softplus(x):
z = np.sqrt(np.pi / 2)
return (z * x * torch.erf(x / np.sqrt(2)) + torch.exp(-x ** 2 / 2) + z * x
) / (2 * z)
def gaussian_softplus2(x):
z = np.sqrt(np.pi / 2)
return (z * x * torch.erf(x / np.sqrt(2)) + torch.exp(-x ** 2 / 2) + z * x
) / z
def get_softplus(softplus_type='softplus', zero_softplus=False):
if softplus_type == 'softplus':
act = nn.functional.softplus
elif softplus_type == 'gaussian_softplus':
act = gaussian_softplus
elif softplus_type == 'gaussian_softplus2':
act = gaussian_softplus2
elif softplus_type == 'laplace_softplus':
act = gaussian_softplus
elif softplus_type == 'cauchy_softplus':
act = cauchy_softplus
else:
raise NotImplementedError(
f'softplus type {softplus_type} not supported.')
if zero_softplus:
act = activation_shifting(act)
return act
class Softplus(nn.Module):
def __init__(self, softplus_type='softplus', zero_softplus=False):
super(Softplus, self).__init__()
self.softplus_type = softplus_type
self.zero_softplus = zero_softplus
def forward(self, x):
return get_softplus(self.softplus_type, self.zero_softplus)(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch.utils.data import Dataset as Dataset
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def activation_shifting(activation):
def shifted_activation(x):
return activation(x) - activation(torch.zeros_like(x))
return shifted_activation
def cauchy_softplus(x):
pi = np.pi
return (x * pi - torch.log(x ** 2 + 1) + 2 * x * torch.atan(x)) / (2 * pi)
def gaussian_softplus(x):
z = np.sqrt(np.pi / 2)
return (z * x * torch.erf(x / np.sqrt(2)) + torch.exp(-x ** 2 / 2) + z * x
) / (2 * z)
def gaussian_softplus2(x):
z = np.sqrt(np.pi / 2)
return (z * x * torch.erf(x / np.sqrt(2)) + torch.exp(-x ** 2 / 2) + z * x
) / z
def get_softplus(softplus_type='softplus', zero_softplus=False):
if softplus_type == 'softplus':
act = nn.functional.softplus
elif softplus_type == 'gaussian_softplus':
act = gaussian_softplus
elif softplus_type == 'gaussian_softplus2':
act = gaussian_softplus2
elif softplus_type == 'laplace_softplus':
act = gaussian_softplus
elif softplus_type == 'cauchy_softplus':
act = cauchy_softplus
else:
raise NotImplementedError(
f'softplus type {softplus_type} not supported.')
if zero_softplus:
act = activation_shifting(act)
return act
class SoftplusNew(nn.Module):
def __init__(self, softplus_type='softplus', zero_softplus=False):
super(SoftplusNew, self).__init__()
self.softplus_type = softplus_type
self.zero_softplus = zero_softplus
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
JunLi-Galios/CP-Flow
|
Softplus
| false
| 11,599
|
[
"MIT"
] | 0
|
69272636c8c644ce3c96bbc4d610591756b8e3ff
|
https://github.com/JunLi-Galios/CP-Flow/tree/69272636c8c644ce3c96bbc4d610591756b8e3ff
|
PosConv2d
|
import torch
from torch import Tensor
from torch.utils.data import Dataset as Dataset
import torch.nn.init as init
import torch.utils.data
class PosConv2d(torch.nn.Conv2d):
def reset_parameters(self) ->None:
super().reset_parameters()
self.fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
def forward(self, x: 'Tensor') ->Tensor:
return self._conv_forward(x, torch.nn.functional.softplus(self.
weight), self.bias) / self.fan_in
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.utils.data import Dataset as Dataset
import torch.nn.init as init
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.015625
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](primals_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_div_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf0
class PosConv2dNew(torch.nn.Conv2d):
def reset_parameters(self) ->None:
super().reset_parameters()
self.fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JunLi-Galios/CP-Flow
|
PosConv2d
| false
| 11,600
|
[
"MIT"
] | 0
|
69272636c8c644ce3c96bbc4d610591756b8e3ff
|
https://github.com/JunLi-Galios/CP-Flow/tree/69272636c8c644ce3c96bbc4d610591756b8e3ff
|
EqualConv2d
|
import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, buf0, primals_3, buf0
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2dNew(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input_0):
primals_2 = self.conv.bias
primals_1 = self.conv.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KUMartin77/AAA738_StyleGAN_pytorch
|
EqualConv2d
| false
| 11,601
|
[
"BSD-2-Clause"
] | 0
|
ed0689102c922d336f53e374e8be2ab532a84ccd
|
https://github.com/KUMartin77/AAA738_StyleGAN_pytorch/tree/ed0689102c922d336f53e374e8be2ab532a84ccd
|
SoftCrossEntropyLoss
|
import torch
import torch.utils.data
class SoftCrossEntropyLoss(torch.nn.Module):
"""SoftCrossEntropyLoss (useful for label smoothing and mixup).
Identical to torch.nn.CrossEntropyLoss if used with one-hot labels."""
def __init__(self):
super(SoftCrossEntropyLoss, self).__init__()
def forward(self, x, y):
loss = -y * torch.nn.functional.log_softmax(x, -1)
return torch.sum(loss) / x.shape[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp2 = tl.load(in_ptr1 + r2, None)
tmp3 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp4 = tl_math.exp(tmp3)
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tmp1 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2,
arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class SoftCrossEntropyLossNew(torch.nn.Module):
"""SoftCrossEntropyLoss (useful for label smoothing and mixup).
Identical to torch.nn.CrossEntropyLoss if used with one-hot labels."""
def __init__(self):
super(SoftCrossEntropyLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
KateHaeun/pycls
|
SoftCrossEntropyLoss
| false
| 11,602
|
[
"MIT"
] | 0
|
f3d87a36cb0a8adead31c7ad98f43facf7fe4c47
|
https://github.com/KateHaeun/pycls/tree/f3d87a36cb0a8adead31c7ad98f43facf7fe4c47
|
FusedUpsample
|
import torch
from torch import nn
from torch.nn import functional as F
from math import sqrt
class FusedUpsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:,
:, 1:, :-1] + weight[:, :, :-1, :-1]) / 4
out = F.conv_transpose2d(input, weight, self.bias, stride=2,
padding=self.pad)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0
)
tmp12 = 0.1767766952966369
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x1
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp6
tmp21 = tmp20 & tmp7
tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask,
other=0.0)
tmp23 = tmp22 * tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp15 + tmp25
tmp27 = -1 + x0
tmp28 = tmp27 >= tmp1
tmp29 = tmp27 < tmp3
tmp30 = tmp8 & tmp28
tmp31 = tmp30 & tmp29
tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask,
other=0.0)
tmp33 = tmp32 * tmp12
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tmp26 + tmp35
tmp37 = tmp19 & tmp28
tmp38 = tmp37 & tmp29
tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = tmp39 * tmp12
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp36 + tmp42
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.store(in_out_ptr0 + x4, tmp45, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 121 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 11, 11), (484, 121, 11, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(1936)](buf3, primals_2, 1936,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_3, buf1
class FusedUpsampleNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(in_channel, out_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KUMartin77/AAA738_StyleGAN_pytorch
|
FusedUpsample
| false
| 11,603
|
[
"BSD-2-Clause"
] | 0
|
ed0689102c922d336f53e374e8be2ab532a84ccd
|
https://github.com/KUMartin77/AAA738_StyleGAN_pytorch/tree/ed0689102c922d336f53e374e8be2ab532a84ccd
|
AdaptiveInstanceNorm
|
import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4
), 0), out=buf1)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5,
primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf1
del primals_2
return buf6, buf0, primals_3, primals_4, buf2, buf5
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input_0, input_1):
primals_2 = self.style.linear.bias
primals_1 = self.style.linear.weight_orig
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
KUMartin77/AAA738_StyleGAN_pytorch
|
AdaptiveInstanceNorm
| false
| 11,604
|
[
"BSD-2-Clause"
] | 0
|
ed0689102c922d336f53e374e8be2ab532a84ccd
|
https://github.com/KUMartin77/AAA738_StyleGAN_pytorch/tree/ed0689102c922d336f53e374e8be2ab532a84ccd
|
ResHead
|
from torch.nn import Module
import torch
import torch.utils.data
import torch.nn as nn
def gap2d(_w_in):
"""Helper for building a gap2d layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def gap2d_cx(cx, _w_in):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
flops, params, acts = cx['flops'], cx['params'], cx['acts']
return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts}
def linear(w_in, w_out, *, bias=False):
"""Helper for building a linear layer."""
return nn.Linear(w_in, w_out, bias=bias)
def linear_cx(cx, w_in, w_out, *, bias=False):
"""Accumulates complexity of linear into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
flops += w_in * w_out + (w_out if bias else 0)
params += w_in * w_out + (w_out if bias else 0)
acts += w_out
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ResHead(Module):
"""ResNet head: AvgPool, 1x1."""
def __init__(self, w_in, num_classes):
super(ResHead, self).__init__()
self.avg_pool = gap2d(w_in)
self.fc = linear(w_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
@staticmethod
def complexity(cx, w_in, num_classes):
cx = gap2d_cx(cx, w_in)
cx = linear_cx(cx, w_in, num_classes, bias=True)
return cx
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'w_in': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4,
1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf2)
del primals_2
del primals_3
return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
def gap2d(_w_in):
"""Helper for building a gap2d layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def gap2d_cx(cx, _w_in):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
flops, params, acts = cx['flops'], cx['params'], cx['acts']
return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts}
def linear(w_in, w_out, *, bias=False):
"""Helper for building a linear layer."""
return nn.Linear(w_in, w_out, bias=bias)
def linear_cx(cx, w_in, w_out, *, bias=False):
"""Accumulates complexity of linear into cx = (h, w, flops, params, acts)."""
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
flops += w_in * w_out + (w_out if bias else 0)
params += w_in * w_out + (w_out if bias else 0)
acts += w_out
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ResHeadNew(Module):
"""ResNet head: AvgPool, 1x1."""
def __init__(self, w_in, num_classes):
super(ResHeadNew, self).__init__()
self.avg_pool = gap2d(w_in)
self.fc = linear(w_in, num_classes, bias=True)
@staticmethod
def complexity(cx, w_in, num_classes):
cx = gap2d_cx(cx, w_in)
cx = linear_cx(cx, w_in, num_classes, bias=True)
return cx
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KateHaeun/pycls
|
ResHead
| false
| 11,605
|
[
"MIT"
] | 0
|
f3d87a36cb0a8adead31c7ad98f43facf7fe4c47
|
https://github.com/KateHaeun/pycls/tree/f3d87a36cb0a8adead31c7ad98f43facf7fe4c47
|
EqualLinear
|
import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinearNew(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input_0):
primals_2 = self.linear.bias
primals_1 = self.linear.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KUMartin77/AAA738_StyleGAN_pytorch
|
EqualLinear
| false
| 11,606
|
[
"BSD-2-Clause"
] | 0
|
ed0689102c922d336f53e374e8be2ab532a84ccd
|
https://github.com/KUMartin77/AAA738_StyleGAN_pytorch/tree/ed0689102c922d336f53e374e8be2ab532a84ccd
|
TransformerEncoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class TransformerEncoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def forward(self, x, key_padding_mask=None, attn_mask=None):
residual = x
x = self.self_attn_layer_norm(x)
x, _att = self.self_attn(query=x, key=x, value=x, key_padding_mask=
key_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(16)](buf17, primals_11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (4, 4), (1,
4), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.normalize_before = True
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_3 = self.self_attn_layer_norm.weight
primals_7 = self.self_attn_layer_norm.bias
primals_6 = self.fc1.weight
primals_8 = self.fc1.bias
primals_10 = self.fc2.weight
primals_9 = self.fc2.bias
primals_11 = self.layer_norm.weight
primals_13 = self.layer_norm.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
IA3005/NLP_ens
|
TransformerEncoderLayer
| false
| 11,607
|
[
"MIT"
] | 0
|
794ebbff46d5e6d5476f29b577b40bbb52991246
|
https://github.com/IA3005/NLP_ens/tree/794ebbff46d5e6d5476f29b577b40bbb52991246
|
ConvInRelu
|
import torch
import numpy as np
from torch import nn
import torch.onnx
class ConvInRelu(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size, stride=1):
super(ConvInRelu, self).__init__()
self.n_params = 0
self.channels = channels_out
self.reflection_pad = nn.ReflectionPad2d(int(np.floor(kernel_size / 2))
)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size,
stride, padding=0)
self.instancenorm = nn.InstanceNorm2d(channels_out)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.reflection_pad(x)
x = self.conv(x)
x = self.instancenorm(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 25 * x3), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(rmask & xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 25.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = tl.full([1, 1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(in_out_ptr0 + (r2 + 25 * x3), tmp2, rmask & xmask)
tl.store(out_ptr2 + (r2 + 25 * x3), tmp27, rmask & xmask)
tl.store(out_ptr3 + (r2 + 25 * x3), tmp29, rmask & xmask)
tl.store(out_ptr4 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf7 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1[
grid(16)](buf2, primals_3, buf3, buf7, buf8, buf6, 16, 25,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
return buf7, primals_2, buf0, buf2, reinterpret_tensor(buf6, (16,), (1,), 0
), buf8, reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
class ConvInReluNew(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size, stride=1):
super(ConvInReluNew, self).__init__()
self.n_params = 0
self.channels = channels_out
self.reflection_pad = nn.ReflectionPad2d(int(np.floor(kernel_size / 2))
)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size,
stride, padding=0)
self.instancenorm = nn.InstanceNorm2d(channels_out)
self.relu = nn.ReLU(inplace=False)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JuanFuriaz/donkey_share
|
ConvInRelu
| false
| 11,608
|
[
"MIT"
] | 0
|
caad831ca21094f05f9084f881ca3bbfa4168e4c
|
https://github.com/JuanFuriaz/donkey_share/tree/caad831ca21094f05f9084f881ca3bbfa4168e4c
|
FCNet
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
class FCNet(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.l1 = nn.Linear(input_size, 5)
self.relu = nn.ReLU()
self.l2 = nn.Linear(5, output_size)
def forward(self, x):
output = self.l1(x)
output = self.relu(output)
output = self.l2(output)
return output.view(-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from typing import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 5
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (5, 4), (4, 1))
assert_size_stride(primals_2, (5,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 5), (5, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(320)](buf1,
primals_2, buf3, 320, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 5), (
5, 1), 0), reinterpret_tensor(primals_4, (5, 4), (1, 5), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (256,), (1,), 0), reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 5), (
5, 1), 0), primals_4, buf3
class FCNetNew(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.l1 = nn.Linear(input_size, 5)
self.relu = nn.ReLU()
self.l2 = nn.Linear(5, output_size)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Johnsonms/NNI_master
|
FCNet
| false
| 11,609
|
[
"MIT"
] | 0
|
e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
https://github.com/Johnsonms/NNI_master/tree/e5e5c7aed89cf3189cffe1056464833c15eb54ff
|
Classifier
|
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, n_hid, n_out):
super(Classifier, self).__init__()
self.n_hid = n_hid
self.n_out = n_out
self.linear = nn.Linear(n_hid, n_out)
def forward(self, x):
tx = self.linear(x)
return torch.log_softmax(tx.squeeze(), dim=-1)
def __repr__(self):
return '{}(n_hid={}, n_out={})'.format(self.__class__.__name__,
self.n_hid, self.n_out)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_hid': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class ClassifierNew(nn.Module):
def __init__(self, n_hid, n_out):
super(ClassifierNew, self).__init__()
self.n_hid = n_hid
self.n_out = n_out
self.linear = nn.Linear(n_hid, n_out)
def __repr__(self):
return '{}(n_hid={}, n_out={})'.format(self.__class__.__name__,
self.n_hid, self.n_out)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KathleenQ/context-aware-doc-analysis
|
Classifier
| false
| 11,610
|
[
"MIT"
] | 0
|
93af994b2dee09f5fe6bfcc2e76e47e74708d3fe
|
https://github.com/KathleenQ/context-aware-doc-analysis/tree/93af994b2dee09f5fe6bfcc2e76e47e74708d3fe
|
AdaptiveCatAvgMaxPool2d
|
import torch
import torch.nn as nn
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
@triton.jit
def triton_per_fused_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr1 + (x2 + 8 * x3), tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 4)
get_raw_stream(0)
triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf3, (4, 4, 1, 1), (8, 1, 1, 1), 0)
triton_per_fused_mean_1[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf3,
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
class AdaptiveCatAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveCatAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DifferentSC/pytorch-image-models
|
AdaptiveCatAvgMaxPool2d
| false
| 11,611
|
[
"Apache-2.0"
] | 0
|
ccfb5751abc70d80add4f197464190c4a2637c6c
|
https://github.com/DifferentSC/pytorch-image-models/tree/ccfb5751abc70d80add4f197464190c4a2637c6c
|
GlobalAttention
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, dim, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.dim = dim
assert attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
src_batch, src_len, _src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, source, memory_bank, memory_lengths=None,
memory_masks=None):
"""
Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
align = self.score(source, memory_bank)
if memory_masks is not None:
memory_masks = memory_masks.transpose(0, 1)
memory_masks = memory_masks.transpose(1, 2)
align.masked_fill_(1 - memory_masks.byte(), -float('inf'))
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1)
align.masked_fill_(1 - mask, -float('inf'))
align_vectors = F.softmax(align.view(batch * target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
c = torch.bmm(align_vectors, memory_bank)
concat_c = torch.cat([c, source], 2).view(batch * target_l, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ['general', 'dot']:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
return attn_h, align_vectors
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x3, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt(
lengths.unsqueeze(1))
class GlobalAttentionNew(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, dim, attn_type='dot'):
super(GlobalAttentionNew, self).__init__()
self.dim = dim
assert attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
src_batch, src_len, _src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
Katarina11/PreSumm
|
GlobalAttention
| false
| 11,612
|
[
"MIT"
] | 0
|
616e72f038d512e9e9112af375d66a0b2e3db6cd
|
https://github.com/Katarina11/PreSumm/tree/616e72f038d512e9e9112af375d66a0b2e3db6cd
|
UnbalancedLoss
|
import torch
import torch.nn as nn
import torch.utils.data
class UnbalancedLoss(nn.Module):
NUM_LABELS = 2
def __init__(self):
super().__init__()
self.crit = nn.BCEWithLogitsLoss()
def forward(self, logits, label):
return self.crit(logits, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class UnbalancedLossNew(nn.Module):
NUM_LABELS = 2
def __init__(self):
super().__init__()
self.crit = nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Kwonyoung-Ryu/DeepGlobalRegistration
|
UnbalancedLoss
| false
| 11,613
|
[
"MIT"
] | 0
|
0045118d96182047f4c09c4c4fe2a1b2b527cc5f
|
https://github.com/Kwonyoung-Ryu/DeepGlobalRegistration/tree/0045118d96182047f4c09c4c4fe2a1b2b527cc5f
|
Network
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (2, 84), (84, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 2), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class NetworkNew(nn.Module):
def __init__(self):
super(NetworkNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Karansutradhar/Convolution-Neural-Network-Objection-Recognition-Dogs-Cats
|
Network
| false
| 11,614
|
[
"MIT"
] | 0
|
85dfab2e8758a5cf49368938b03720f197a06b18
|
https://github.com/Karansutradhar/Convolution-Neural-Network-Objection-Recognition-Dogs-Cats/tree/85dfab2e8758a5cf49368938b03720f197a06b18
|
ConformerFeedForward
|
import torch
from torch import nn
import torch.utils.data
import torch.optim
class Swish(nn.Module):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
"""
def forward(self, x):
return x * torch.sigmoid(x)
class ConformerFeedForward(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForward, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class Swish(nn.Module):
"""
Swish activation function introduced in 'https://arxiv.org/abs/1710.05941'
"""
def forward(self, x):
return x * torch.sigmoid(x)
class ConformerFeedForwardNew(nn.Module):
"""
feed-forward module of Conformer model.
"""
def __init__(self, d_model, d_ff, dropout, activation=Swish()):
super(ConformerFeedForwardNew, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JINHXu/NeMo
|
ConformerFeedForward
| false
| 11,615
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
AngleSimpleLinear
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
import torch.utils.data
class AngleSimpleLinear(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super(AngleSimpleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
cos_theta = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return cos_theta.clamp(-1, 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_clamp_ge_le_logical_and_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -1.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = tmp0 >= tmp1
tmp6 = tmp0 <= tmp3
tmp7 = tmp5 & tmp6
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
buf3 = buf1
del buf1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_ge_le_logical_and_2[grid(16)](buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_2, buf4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class AngleSimpleLinearNew(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super(AngleSimpleLinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
KhurramPirov/Twins-recognition
|
AngleSimpleLinear
| false
| 11,616
|
[
"MIT"
] | 0
|
f99ba1128afb3674a49db6a4b19afd5108c3fdf9
|
https://github.com/KhurramPirov/Twins-recognition/tree/f99ba1128afb3674a49db6a4b19afd5108c3fdf9
|
ScalarMix
|
import torch
import torch.nn as nn
class ScalarMix(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, tensors):
"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](primals_3, primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4,
4), (16, 4, 1), 0)
class ScalarMixNew(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, input_0):
primals_1 = self.weights
primals_3 = self.gamma
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KoichiYasuoka/SuPar
|
ScalarMix
| false
| 11,617
|
[
"MIT"
] | 0
|
9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
https://github.com/KoichiYasuoka/SuPar/tree/9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
ConvGLU
|
import torch
from torch import nn
import torch.utils.data
import torch.optim
def str2act(txt):
"""Translates text to neural network activation"""
return {'sigmoid': nn.Sigmoid(), 'relu': nn.ReLU(), 'none': nn.
Sequential(), 'lrelu': nn.LeakyReLU(0.2), 'selu': nn.SELU()}[txt.
lower()]
class ConvGLU(nn.Module):
"""
A convGlu operation, used by the Degli paper's model.
"""
def __init__(self, in_ch, out_ch, kernel_size=(7, 7), padding=None,
batchnorm=False, act='sigmoid', stride=None):
super().__init__()
if not padding:
padding = kernel_size[0] // 2, kernel_size[1] // 2
if stride is None:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding)
else:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding, stride=stride)
self.weight = self.conv.weight
self.bias = self.conv.bias
if batchnorm:
self.conv = nn.Sequential(self.conv, nn.BatchNorm2d(out_ch * 2))
self.sigmoid = str2act(act)
def forward(self, x):
x = self.conv(x)
ch = x.shape[1]
x = x[:, :ch // 2, ...] * self.sigmoid(x[:, ch // 2:, ...])
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def str2act(txt):
"""Translates text to neural network activation"""
return {'sigmoid': nn.Sigmoid(), 'relu': nn.ReLU(), 'none': nn.
Sequential(), 'lrelu': nn.LeakyReLU(0.2), 'selu': nn.SELU()}[txt.
lower()]
class ConvGLUNew(nn.Module):
"""
A convGlu operation, used by the Degli paper's model.
"""
def __init__(self, in_ch, out_ch, kernel_size=(7, 7), padding=None,
batchnorm=False, act='sigmoid', stride=None):
super().__init__()
if not padding:
padding = kernel_size[0] // 2, kernel_size[1] // 2
if stride is None:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding)
else:
self.conv = nn.Conv2d(in_ch, out_ch * 2, kernel_size, padding=
padding, stride=stride)
self.weight = self.conv.weight
self.bias = self.conv.bias
if batchnorm:
self.conv = nn.Sequential(self.conv, nn.BatchNorm2d(out_ch * 2))
self.sigmoid = str2act(act)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JINHXu/NeMo
|
ConvGLU
| false
| 11,618
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
AdaptiveAvgMaxPool2d
|
import torch
import torch.nn as nn
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torchvision.transforms.functional as F
import torch.nn.functional as F
import torch.nn.parallel
from torch import optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_adaptive_max_pool2d_add_mean_mul_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tmp21 = triton_helpers.maximum(tmp20, tmp19)
tmp23 = triton_helpers.maximum(tmp22, tmp21)
tmp25 = triton_helpers.maximum(tmp24, tmp23)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp35 = triton_helpers.maximum(tmp34, tmp33)
tmp36 = 16.0
tmp37 = tmp4 / tmp36
tmp38 = tmp37 + tmp35
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp40, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_adaptive_max_pool2d_add_mean_mul_0[grid(16)](buf2,
arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
def adaptive_avgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
class AdaptiveAvgMaxPool2dNew(nn.Module):
def __init__(self, output_size=1):
super(AdaptiveAvgMaxPool2dNew, self).__init__()
self.output_size = output_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DifferentSC/pytorch-image-models
|
AdaptiveAvgMaxPool2d
| false
| 11,619
|
[
"Apache-2.0"
] | 0
|
ccfb5751abc70d80add4f197464190c4a2637c6c
|
https://github.com/DifferentSC/pytorch-image-models/tree/ccfb5751abc70d80add4f197464190c4a2637c6c
|
DuelingQNetwork
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class DuelingQNetwork(nn.Module):
"""Dueling Q-network (https://arxiv.org/abs/1511.06581)"""
def __init__(self, state_size, action_size, hidsize1=128, hidsize2=128):
super(DuelingQNetwork, self).__init__()
self.fc1_val = nn.Linear(state_size, hidsize1)
self.fc2_val = nn.Linear(hidsize1, hidsize2)
self.fc4_val = nn.Linear(hidsize2, 1)
self.fc1_adv = nn.Linear(state_size, hidsize1)
self.fc2_adv = nn.Linear(hidsize1, hidsize2)
self.fc4_adv = nn.Linear(hidsize2, action_size)
def forward(self, x):
val = F.relu(self.fc1_val(x))
val = F.relu(self.fc2_val(val))
val = self.fc4_val(val)
adv = F.relu(self.fc1_adv(x))
adv = F.relu(self.fc2_adv(adv))
adv = self.fc4_adv(adv)
return val + adv - adv.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_per_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp7 = tmp4 + tmp6
tmp8 = tmp7 + tmp0
tmp9 = 256.0
tmp10 = tmp3 / tmp9
tmp11 = tmp8 - tmp10
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp11, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (1, 128), (128, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (128, 4), (4, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 128), (128, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf15, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf14 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3,
primals_5, buf14, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), out=buf4)
buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 128), (1, 4), 0), out=buf5)
del primals_8
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf5
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf6,
primals_9, buf13, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_10, (128, 128), (1, 128), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf7
buf12 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf8,
primals_11, buf12, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf9)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_mean_sub_1[grid(1)](buf9, buf4, primals_7,
buf11, 1, 256, num_warps=2, num_stages=1)
del buf4
del buf9
del primals_7
return buf11, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), reinterpret_tensor(buf6, (64, 128), (128, 1), 0
), reinterpret_tensor(buf8, (64, 128), (128, 1), 0
), primals_12, buf12, primals_10, buf13, primals_6, buf14, primals_4, buf15
class DuelingQNetworkNew(nn.Module):
"""Dueling Q-network (https://arxiv.org/abs/1511.06581)"""
def __init__(self, state_size, action_size, hidsize1=128, hidsize2=128):
super(DuelingQNetworkNew, self).__init__()
self.fc1_val = nn.Linear(state_size, hidsize1)
self.fc2_val = nn.Linear(hidsize1, hidsize2)
self.fc4_val = nn.Linear(hidsize2, 1)
self.fc1_adv = nn.Linear(state_size, hidsize1)
self.fc2_adv = nn.Linear(hidsize1, hidsize2)
self.fc4_adv = nn.Linear(hidsize2, action_size)
def forward(self, input_0):
primals_1 = self.fc1_val.weight
primals_2 = self.fc1_val.bias
primals_4 = self.fc2_val.weight
primals_5 = self.fc2_val.bias
primals_6 = self.fc4_val.weight
primals_7 = self.fc4_val.bias
primals_8 = self.fc1_adv.weight
primals_9 = self.fc1_adv.bias
primals_10 = self.fc2_adv.weight
primals_11 = self.fc2_adv.bias
primals_12 = self.fc4_adv.weight
primals_13 = self.fc4_adv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
KantiCodes/flatland-rl
|
DuelingQNetwork
| false
| 11,620
|
[
"MIT"
] | 0
|
fcc10e83d2548470ebaa5540b967db0940eb30dd
|
https://github.com/KantiCodes/flatland-rl/tree/fcc10e83d2548470ebaa5540b967db0940eb30dd
|
AsymmetricLossOptimized
|
import torch
import torch.nn as nn
class AsymmetricLossOptimized(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations
https://github.com/Alibaba-MIIL/ASL/blob/main/src/loss_functions/losses.py
"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimized, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=
self.eps)))
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(False)
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(True)
self.loss *= self.asymmetric_w
return -self.loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp0
tmp6 = tmp1 - tmp4
tmp7 = 0.05
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp1)
tmp10 = tmp9 * tmp2
tmp11 = tmp1 - tmp5
tmp12 = tmp11 - tmp10
tmp13 = tmp0 * tmp1
tmp14 = 4.0
tmp15 = tmp2 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.pow(tmp12, tmp16)
tmp18 = 1e-08
tmp19 = triton_helpers.maximum(tmp4, tmp18)
tmp20 = tl_math.log(tmp19)
tmp21 = tmp0 * tmp20
tmp22 = triton_helpers.maximum(tmp9, tmp18)
tmp23 = tl_math.log(tmp22)
tmp24 = tmp2 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp25 * tmp17
tmp27 = -tmp26
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
tl.store(out_ptr2 + x0, tmp10, xmask)
tl.store(out_ptr3 + x0, tmp17, xmask)
tl.store(out_ptr4 + x0, tmp26, xmask)
tl.store(out_ptr5 + x0, tmp27, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_0[grid(256)
](arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, buf5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf5, buf3, buf4, buf2, buf1, buf0
class AsymmetricLossOptimizedNew(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations
https://github.com/Alibaba-MIIL/ASL/blob/main/src/loss_functions/losses.py
"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimizedNew, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LanXiangExcavator/python-classifier-2021
|
AsymmetricLossOptimized
| false
| 11,621
|
[
"BSD-2-Clause"
] | 0
|
851079e76db8e5070132d1120dba941967e1245b
|
https://github.com/LanXiangExcavator/python-classifier-2021/tree/851079e76db8e5070132d1120dba941967e1245b
|
DiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target, logits=True):
if logits:
input = nn.Sigmoid()(input)
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LanXiangExcavator/python-classifier-2021
|
DiceLoss
| false
| 11,622
|
[
"BSD-2-Clause"
] | 0
|
851079e76db8e5070132d1120dba941967e1245b
|
https://github.com/LanXiangExcavator/python-classifier-2021/tree/851079e76db8e5070132d1120dba941967e1245b
|
MultiLayerPerceptron
|
import torch
import torch.utils.data
import torch.optim
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation)
)
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
return output_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class MultiLayerPerceptronNew(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', getattr(torch, activation)
)
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, input_0):
primals_2 = self.layer0.weight
primals_3 = self.layer0.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
JINHXu/NeMo
|
MultiLayerPerceptron
| false
| 11,623
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
TransformerDecoderLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torch.distributions
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block. Follows an implementation in fairseq with args.decoder_normalize_before=True,
i.e. order of operations is different from those in the original paper.
"""
def __init__(self, num_heads, embed_dim, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.encoder_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.encoder_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x, encoder_out, key_mask=None, attn_mask=None):
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(query=x, key=x, value=x, key_padding_mask=
key_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(query=x, key=encoder_out, value=encoder_out
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x, attn
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'embed_dim': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_mean_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (12, 4), (4, 1))
assert_size_stride(primals_12, (12,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (4, 4), (1,
4), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 4),
primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 8),
primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf18)
buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0)
del buf16
triton_poi_fused_mul_2[grid(16)](buf19, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf20 = buf8
del buf8
extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1,
1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf20, buf21, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf22 = buf20
del buf20
triton_poi_fused__softmax_4[grid(64)](buf21, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf21
buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1,
4, 1), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf23, buf24, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0)
del buf23
extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf25)
buf26 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mean_8[grid(16)](buf22, buf26, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf27 = buf25
del buf25
triton_poi_fused_add_9[grid(16)](buf27, primals_1, buf12,
primals_14, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_14
buf28 = buf14
del buf14
buf29 = buf13
del buf13
triton_poi_fused_native_layer_norm_0[grid(4)](buf27, buf28, buf29,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf27, buf28, buf29,
primals_15, primals_16, buf30, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf28
del buf29
del primals_16
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf30, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf31)
buf32 = buf31
del buf31
triton_poi_fused_relu_10[grid(16)](buf32, primals_18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_18
buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf32, reinterpret_tensor(primals_19, (4, 4), (1,
4), 0), out=buf33)
buf34 = buf33
del buf33
triton_poi_fused_add_11[grid(16)](buf34, buf27, primals_20, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_20
return (buf34, reinterpret_tensor(buf26, (4, 4), (4, 1), 0), primals_1,
primals_8, primals_15, buf2, buf9, reinterpret_tensor(buf11, (4, 4),
(4, 1), 0), buf12, buf15, primals_10, buf22, reinterpret_tensor(
buf24, (4, 4), (4, 1), 0), buf27, buf30, buf32, primals_19,
primals_17, primals_13, reinterpret_tensor(buf18, (4, 1, 4), (1, 1,
4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (4, 1), 0), primals_6,
reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
class TransformerDecoderLayerNew(nn.Module):
"""Decoder layer block. Follows an implementation in fairseq with args.decoder_normalize_before=True,
i.e. order of operations is different from those in the original paper.
"""
def __init__(self, num_heads, embed_dim, hidden_size, dropout=0.0,
attention_dropout=0.0, activation_dropout=0.0):
super().__init__()
self.embed_dim = embed_dim
self.self_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.dropout = dropout
self.activation_dropout = activation_dropout
self.self_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.encoder_attn = torch.nn.MultiheadAttention(embed_dim=self.
embed_dim, num_heads=num_heads, dropout=attention_dropout)
self.encoder_attn_layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.fc1 = torch.nn.Linear(self.embed_dim, hidden_size)
self.fc2 = torch.nn.Linear(hidden_size, self.embed_dim)
self.layer_norm = torch.nn.LayerNorm(self.embed_dim)
self.init_parameters()
def init_parameters(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, input_0, input_1):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_3 = self.self_attn_layer_norm.weight
primals_7 = self.self_attn_layer_norm.bias
primals_11 = self.encoder_attn.in_proj_weight
primals_12 = self.encoder_attn.in_proj_bias
primals_6 = self.encoder_attn.out_proj.weight
primals_8 = self.encoder_attn.out_proj.bias
primals_9 = self.encoder_attn_layer_norm.weight
primals_14 = self.encoder_attn_layer_norm.bias
primals_10 = self.fc1.weight
primals_15 = self.fc1.bias
primals_13 = self.fc2.weight
primals_16 = self.fc2.bias
primals_18 = self.layer_norm.weight
primals_20 = self.layer_norm.bias
primals_17 = input_0
primals_19 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0], output[1]
|
IA3005/NLP_ens
|
TransformerDecoderLayer
| false
| 11,624
|
[
"MIT"
] | 0
|
794ebbff46d5e6d5476f29b577b40bbb52991246
|
https://github.com/IA3005/NLP_ens/tree/794ebbff46d5e6d5476f29b577b40bbb52991246
|
FixedSubnetConv
|
import math
import torch
import torch.multiprocessing
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.functional as F
class FixedSubnetConv(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
None
def set_subnet(self):
output = self.clamped_scores().clone()
_, idx = self.clamped_scores().flatten().abs().sort()
p = int(self.prune_rate * self.clamped_scores().numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
flat_oup[idx[p:]] = 1
self.scores = torch.nn.Parameter(output)
self.scores.requires_grad = False
def clamped_scores(self):
return self.scores.abs()
def get_subnet(self):
return self.weight * self.scores
def forward(self, x):
w = self.get_subnet()
x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.multiprocessing
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_1, primals_2, primals_4, buf0
class FixedSubnetConvNew(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5))
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
None
def set_subnet(self):
output = self.clamped_scores().clone()
_, idx = self.clamped_scores().flatten().abs().sort()
p = int(self.prune_rate * self.clamped_scores().numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
flat_oup[idx[p:]] = 1
self.scores = torch.nn.Parameter(output)
self.scores.requires_grad = False
def clamped_scores(self):
return self.scores.abs()
def get_subnet(self):
return self.weight * self.scores
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = self.scores
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Lasitha-93/CRTIDL_2021
|
FixedSubnetConv
| false
| 11,625
|
[
"Apache-2.0"
] | 0
|
d6bc6fbe08161c3574511623230a7aa4895f65e1
|
https://github.com/Lasitha-93/CRTIDL_2021/tree/d6bc6fbe08161c3574511623230a7aa4895f65e1
|
AttentionBlock
|
import math
import torch
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
def convert_pad_shape(pad_shape):
"""
Used to get arguments for F.pad
"""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class AttentionBlock(nn.Module):
def __init__(self, channels, out_channels, n_heads, window_size=None,
heads_share=True, p_dropout=0.0, block_length=None, proximal_bias=
False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
nn.init.xavier_uniform_(self.conv_v.weight)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self
.k_channels)
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query,
key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(
rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores * block_mask + -10000.0 * (1 - block_mask)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(relative_embeddings,
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0,
length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length -
1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
)
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4), (16, 4, 1))
buf3 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf3, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf4 = buf1
del buf1
triton_poi_fused_convolution_0[grid(64)](buf4, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = buf2
del buf2
triton_poi_fused_convolution_0[grid(64)](buf8, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4,
4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf10, (4, 4, 4), (16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_0[grid(64)](buf11, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
return (buf11, buf7, primals_1, primals_3, primals_4, primals_6,
primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16,
4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
def convert_pad_shape(pad_shape):
"""
Used to get arguments for F.pad
"""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
class AttentionBlockNew(nn.Module):
def __init__(self, channels, out_channels, n_heads, window_size=None,
heads_share=True, p_dropout=0.0, block_length=None, proximal_bias=
False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel,
window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
nn.init.xavier_uniform_(self.conv_v.weight)
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2)
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(
2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(
2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self
.k_channels)
if self.window_size is not None:
assert t_s == t_t, 'Relative attention is only available for self-attention.'
key_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query,
key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(
rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, 'Proximal bias is only available for self-attention.'
scores = scores + self._attention_bias_proximal(t_s)
if mask is not None:
scores = scores.masked_fill(mask == 0, -10000.0)
if self.block_length is not None:
block_mask = torch.ones_like(scores).triu(-self.block_length
).tril(self.block_length)
scores = scores * block_mask + -10000.0 * (1 - block_mask)
p_attn = F.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(
p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.
emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max(self.window_size + 1 - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(relative_embeddings,
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,
slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0,
length - 1]]))
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:,
:, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length -
1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
)
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)
), 0), 0)
def forward(self, input_0, input_1):
primals_1 = self.conv_q.weight
primals_2 = self.conv_q.bias
primals_4 = self.conv_k.weight
primals_5 = self.conv_k.bias
primals_7 = self.conv_v.weight
primals_8 = self.conv_v.bias
primals_9 = self.conv_o.weight
primals_10 = self.conv_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
JINHXu/NeMo
|
AttentionBlock
| false
| 11,626
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
LayerNorm
|
import torch
from torch import nn
from torch.nn import LayerNorm
import torch.utils.data
import torch.optim
class LayerNorm(nn.Module):
def __init__(self, channels, eps=0.0001):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.rsqrt(tmp15)
tmp17 = tmp0 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0,
primals_2, primals_3, buf1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_2
del primals_3
return buf1, primals_1
class LayerNormNew(nn.Module):
def __init__(self, channels, eps=0.0001):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
JINHXu/NeMo
|
LayerNorm
| false
| 11,627
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
FusedDownsample
|
import torch
from torch import nn
from torch.nn import functional as F
from math import sqrt
class FusedDownsample(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input):
weight = F.pad(self.weight * self.multiplier, [1, 1, 1, 1])
weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + weight[:,
:, 1:, :-1] + weight[:, :, :-1, :-1]) / 4
out = F.conv2d(input, weight, self.bias, stride=2, padding=self.pad)
return out
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0
)
tmp12 = 0.1767766952966369
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp10, tmp13, tmp14)
tmp16 = -1 + x1
tmp17 = tmp16 >= tmp1
tmp18 = tmp16 < tmp3
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp6
tmp21 = tmp20 & tmp7
tmp22 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp21 & xmask,
other=0.0)
tmp23 = tmp22 * tmp12
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp21, tmp23, tmp24)
tmp26 = tmp15 + tmp25
tmp27 = -1 + x0
tmp28 = tmp27 >= tmp1
tmp29 = tmp27 < tmp3
tmp30 = tmp8 & tmp28
tmp31 = tmp30 & tmp29
tmp32 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp31 & xmask,
other=0.0)
tmp33 = tmp32 * tmp12
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tmp26 + tmp35
tmp37 = tmp19 & tmp28
tmp38 = tmp37 & tmp29
tmp39 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp38 & xmask,
other=0.0)
tmp40 = tmp39 * tmp12
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp36 + tmp42
tmp44 = 0.25
tmp45 = tmp43 * tmp44
tl.store(in_out_ptr0 + x4, tmp45, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 14400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(400)](buf1, primals_1, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(primals_3, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 30, 30), (3600, 900, 30, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(14400)](buf3, primals_2, 14400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_3, buf1
class FusedDownsampleNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, padding=0):
super().__init__()
weight = torch.randn(out_channel, in_channel, kernel_size, kernel_size)
bias = torch.zeros(out_channel)
fan_in = in_channel * kernel_size * kernel_size
self.multiplier = sqrt(2 / fan_in)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.pad = padding
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KUMartin77/AAA738_StyleGAN_pytorch
|
FusedDownsample
| false
| 11,628
|
[
"BSD-2-Clause"
] | 0
|
ed0689102c922d336f53e374e8be2ab532a84ccd
|
https://github.com/KUMartin77/AAA738_StyleGAN_pytorch/tree/ed0689102c922d336f53e374e8be2ab532a84ccd
|
Biaffine
|
import torch
import torch.nn as nn
class Biaffine(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KoichiYasuoka/SuPar
|
Biaffine
| false
| 11,629
|
[
"MIT"
] | 0
|
9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
https://github.com/KoichiYasuoka/SuPar/tree/9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
MulticlassDiceLoss
|
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target, logits=True):
if logits:
input = nn.Sigmoid()(input)
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
class MulticlassDiceLoss(nn.Module):
"""
requires one hot encoded target. Applies DiceLoss on each class iteratively.
requires input.shape[0:1] and target.shape[0:1] to be (N, C) where N is
batch size and C is number of classes
"""
def __init__(self):
super(MulticlassDiceLoss, self).__init__()
def forward(self, input, target, weights=None, logits=True):
C = target.shape[1]
dice = DiceLoss()
totalLoss = 0
for i in range(C):
diceLoss = dice(input[:, i], target[:, i], logits)
if weights is not None:
diceLoss *= weights[i]
totalLoss += diceLoss
return totalLoss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp13 = tl.load(in_ptr3 + r0, None)
tmp16 = tl.load(in_ptr4 + r0, None)
tmp17 = tl.load(in_ptr5 + r0, None)
tmp24 = tl.load(in_ptr6 + r0, None)
tmp27 = tl.load(in_ptr7 + r0, None)
tmp28 = tl.load(in_ptr8 + r0, None)
tmp35 = tl.load(in_ptr9 + r0, None)
tmp38 = tl.load(in_ptr10 + r0, None)
tmp39 = tl.load(in_ptr11 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp14 = tmp13 + tmp1
tmp15 = tmp14 * tmp3
tmp18 = tmp16 + tmp17
tmp19 = tmp18 + tmp1
tmp20 = tmp15 / tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp25 = tmp24 + tmp1
tmp26 = tmp25 * tmp3
tmp29 = tmp27 + tmp28
tmp30 = tmp29 + tmp1
tmp31 = tmp26 / tmp30
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp36 = tmp35 + tmp1
tmp37 = tmp36 * tmp3
tmp40 = tmp38 + tmp39
tmp41 = tmp40 + tmp1
tmp42 = tmp37 / tmp41
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = tl.sum(tmp43, 1)[:, None]
tmp46 = 0.25
tmp47 = tmp12 * tmp46
tmp48 = tmp1 - tmp47
tmp49 = 0.0
tmp50 = tmp48 + tmp49
tmp51 = tmp23 * tmp46
tmp52 = tmp1 - tmp51
tmp53 = tmp50 + tmp52
tmp54 = tmp34 * tmp46
tmp55 = tmp1 - tmp54
tmp56 = tmp53 + tmp55
tmp57 = tmp45 * tmp46
tmp58 = tmp1 - tmp57
tmp59 = tmp56 + tmp58
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp59, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf8 = empty_strided_cuda((4,), (1,), torch.float32)
buf9 = empty_strided_cuda((4,), (1,), torch.float32)
buf10 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf8, buf9,
buf10, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = empty_strided_cuda((4,), (1,), torch.float32)
buf13 = empty_strided_cuda((4,), (1,), torch.float32)
buf14 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sum_2[grid(4)](arg1_1, arg0_1, buf12, buf13,
buf14, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf5 = empty_strided_cuda((4,), (1,), torch.float32)
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_sum_3[grid(4)](arg1_1, arg0_1, buf4, buf5,
buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf11 = empty_strided_cuda((), (), torch.float32)
buf16 = buf11
del buf11
triton_per_fused_add_div_mul_rsub_sum_4[grid(1)](buf16, buf0, buf1,
buf2, buf4, buf5, buf6, buf8, buf9, buf10, buf12, buf13, buf14,
1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf10
del buf12
del buf13
del buf14
del buf2
del buf4
del buf5
del buf6
del buf8
del buf9
return buf16,
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target, logits=True):
if logits:
input = nn.Sigmoid()(input)
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
class MulticlassDiceLossNew(nn.Module):
"""
requires one hot encoded target. Applies DiceLoss on each class iteratively.
requires input.shape[0:1] and target.shape[0:1] to be (N, C) where N is
batch size and C is number of classes
"""
def __init__(self):
super(MulticlassDiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
LanXiangExcavator/python-classifier-2021
|
MulticlassDiceLoss
| false
| 11,630
|
[
"BSD-2-Clause"
] | 0
|
851079e76db8e5070132d1120dba941967e1245b
|
https://github.com/LanXiangExcavator/python-classifier-2021/tree/851079e76db8e5070132d1120dba941967e1245b
|
Triaffine
|
import torch
import torch.nn as nn
class Triaffine(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y, z):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
z (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, seq_len, seq_len, seq_len]``.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
w = torch.einsum('bzk,ikj->bzij', z, self.weight)
s = torch.einsum('bxi,bzij,byj->bzxy', x, w, y)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0),
out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1),
torch.float32)
triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16),
(64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1, 1), (64, 16, 4, 1,
1, 1), 0)
del buf2
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16),
(64, 16, 1), 0), out=buf5)
del buf4
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 1, 16), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
class TriaffineNew(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
KoichiYasuoka/SuPar
|
Triaffine
| false
| 11,631
|
[
"MIT"
] | 0
|
9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
https://github.com/KoichiYasuoka/SuPar/tree/9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
InvConvNear
|
import torch
from torch.nn import functional as F
from torch import nn
import torch.utils.data
import torch.optim
class InvConvNear(nn.Module):
def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
super().__init__()
assert n_split % 2 == 0
self.channels = channels
self.n_split = n_split
self.no_jacobian = no_jacobian
w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).
normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.weight = nn.Parameter(w_init)
def forward(self, x, x_mask=None, reverse=False, **kwargs):
b, c, t = x.size()
assert c % self.n_split == 0
if x_mask is None:
x_mask = 1
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
x = x.view(b, 2, c // self.n_split, self.n_split // 2, t)
x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c //
self.n_split, t)
if reverse:
if hasattr(self, 'weight_inv'):
weight = self.weight_inv
else:
weight = torch.inverse(self.weight.float())
logdet = None
else:
weight = self.weight
if self.no_jacobian:
logdet = 0
else:
logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len
weight = weight.view(self.n_split, self.n_split, 1, 1)
z = F.conv2d(x, weight)
z = z.view(b, 2, self.n_split // 2, c // self.n_split, t)
z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
return z, logdet
def store_inverse(self):
self.weight_inv = torch.inverse(self.weight.float())
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = -1.0
tmp3 = tmp1 == tmp2
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused_mul_scalar_tensor_where_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0).to(tl.int1)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = float('nan')
tmp5 = tl.where(tmp1, tmp4, tmp3)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = 4.0
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (1, 4))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._linalg_slogdet.default(primals_2)
buf1 = buf0[0]
buf2 = buf0[1]
buf3 = buf0[2]
buf4 = buf0[3]
del buf0
buf5 = empty_strided_cuda((), (), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_0[grid(1)](buf1, buf5, 1, XBLOCK=1, num_warps=1,
num_stages=1)
del buf1
buf6 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_scalar_tensor_where_1[grid(4)](buf5, buf2,
buf6, 4, XBLOCK=4, num_warps=1, num_stages=1)
del buf2
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_2[grid(4, 4)](primals_2, buf7, 4, 4,
XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1)
buf8 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
4, 1, 4), (16, 4, 4, 1), 0), buf7, stride=(1, 1), padding=(0, 0
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1, 4), (16, 4, 4, 1))
del buf7
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0)
del buf8
triton_poi_fused_mul_3[grid(64)](buf9, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf9, buf6, reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4,
8, 1), 0), buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 1,
1), (1, 4, 4, 4), 0)
class InvConvNearNew(nn.Module):
def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
super().__init__()
assert n_split % 2 == 0
self.channels = channels
self.n_split = n_split
self.no_jacobian = no_jacobian
w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).
normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.weight = nn.Parameter(w_init)
def store_inverse(self):
self.weight_inv = torch.inverse(self.weight.float())
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
|
JINHXu/NeMo
|
InvConvNear
| false
| 11,632
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
TVLoss
|
import torch
from torch import nn
import torch.utils.data
from torchvision.transforms import *
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w
) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.020833333333333332
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TVLossNew, self).__init__()
self.tv_loss_weight = tv_loss_weight
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
HamsterBiz/iSeeBetter
|
TVLoss
| false
| 11,633
|
[
"MIT"
] | 0
|
a71cee61583bdedab1f3b368e2cb7dc5ad969aed
|
https://github.com/HamsterBiz/iSeeBetter/tree/a71cee61583bdedab1f3b368e2cb7dc5ad969aed
|
SpaceToDepth
|
import torch
import torch.optim
import torch.nn as nn
import torch.utils.data
class SpaceToDepth(nn.Module):
def __init__(self, block_size):
super(SpaceToDepth, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, s_height, s_width, s_depth = output.size()
d_depth = s_depth * self.block_size_sq
int(s_width / self.block_size)
d_height = int(s_height / self.block_size)
t_1 = output.split(self.block_size, 2)
stack = [t_t.reshape(batch_size, d_height, d_depth) for t_t in t_1]
output = torch.stack(stack, 1)
output = output.permute(0, 2, 1, 3)
output = output.permute(0, 3, 1, 2)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'block_size': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.optim
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 64, 64), 0),
class SpaceToDepthNew(nn.Module):
def __init__(self, block_size):
super(SpaceToDepthNew, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
LeikvollE/pytorch-superpoint
|
SpaceToDepth
| false
| 11,634
|
[
"MIT"
] | 0
|
52144a760e0cc46259e57397a5a55f0585fe6d0b
|
https://github.com/LeikvollE/pytorch-superpoint/tree/52144a760e0cc46259e57397a5a55f0585fe6d0b
|
GEGLU
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 4)
class GEGLUNew(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Lawliet19189/squad-1
|
GEGLU
| false
| 11,635
|
[
"MIT"
] | 0
|
75531054d74e20838d8acff81749f335973b9ae3
|
https://github.com/Lawliet19189/squad-1/tree/75531054d74e20838d8acff81749f335973b9ae3
|
ScaleNorm
|
import torch
import torch.nn as nn
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-05
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp18 = tmp15 * tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_0[grid(256)](
primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf0, primals_1
class ScaleNormNew(nn.Module):
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, input_0):
primals_2 = self.g
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Lawliet19189/squad-1
|
ScaleNorm
| false
| 11,636
|
[
"MIT"
] | 0
|
75531054d74e20838d8acff81749f335973b9ae3
|
https://github.com/Lawliet19189/squad-1/tree/75531054d74e20838d8acff81749f335973b9ae3
|
MultiHeadAttention
|
import math
import torch
from torch import nn
import torch.utils.data
import torch.optim
class MultiHeadAttention(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout
=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (hidden_size, num_attention_heads))
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, queries, keys, values, attention_mask):
query = self.query_net(queries)
key = self.key_net(keys)
value = self.value_net(values)
query = self.transpose_for_scores(query) / self.attn_scale
key = self.transpose_for_scores(key) / self.attn_scale
value = self.transpose_for_scores(value)
attention_scores = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
context = torch.matmul(attention_probs, value)
context = context.permute(0, 2, 1, 3).contiguous()
new_context_shape = context.size()[:-2] + (self.hidden_size,)
context = context.view(*new_context_shape)
output_states = self.out_projection(context)
output_states = self.layer_dropout(output_states)
return output_states
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_attention_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_div_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_add_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_div_0[grid(16, 4)](buf0, primals_2, buf3, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_div_0[grid(16, 4)](buf1, primals_5, buf4, 16,
4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_1[grid(64)](buf5, primals_10, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_add_2[grid(256)](buf8, primals_10, buf6,
buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_10
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_3[grid(16, 4)](buf2, primals_8, buf9, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_12, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_12
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout
=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (hidden_size, num_attention_heads))
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.query_net.weight
primals_2 = self.query_net.bias
primals_4 = self.key_net.weight
primals_5 = self.key_net.bias
primals_7 = self.value_net.weight
primals_8 = self.value_net.bias
primals_11 = self.out_projection.weight
primals_12 = self.out_projection.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
JINHXu/NeMo
|
MultiHeadAttention
| false
| 11,637
|
[
"Apache-2.0"
] | 0
|
835db62e39919436824ce022fd3b3f6bac301cd6
|
https://github.com/JINHXu/NeMo/tree/835db62e39919436824ce022fd3b3f6bac301cd6
|
Inception3
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=True, **kwargs)
def forward(self, x):
x = self.conv(x)
return F.relu(x, inplace=True)
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features,
kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=
(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding
=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7),
padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.
branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class Inception3(nn.Module):
def __init__(self, num_classes=1000, transform_input=True):
super(Inception3, self).__init__()
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
def set_params(self, dict):
self.Conv2d_1a_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_1_w']))
self.Conv2d_1a_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_1_b']))
self.Conv2d_2a_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_2_w']))
self.Conv2d_2a_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_2_b']))
self.Conv2d_2b_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_3_w']))
self.Conv2d_2b_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_3_b']))
self.Conv2d_3b_1x1.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_4_w']))
self.Conv2d_3b_1x1.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_4_b']))
self.Conv2d_4a_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_5_w']))
self.Conv2d_4a_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_5_b']))
self.Mixed_5b.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way1_w']))
self.Mixed_5b.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer2_1']['way1_b']))
self.Mixed_5b.branch5x5_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_1_w']))
self.Mixed_5b.branch5x5_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_1_b']))
self.Mixed_5b.branch5x5_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_2_w']))
self.Mixed_5b.branch5x5_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_2_b']))
self.Mixed_5b.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_1_w']))
self.Mixed_5b.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_1_b']))
self.Mixed_5b.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_2_w']))
self.Mixed_5b.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_2_b']))
self.Mixed_5b.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_3_w']))
self.Mixed_5b.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_3_b']))
self.Mixed_5b.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way4_w']))
self.Mixed_5b.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way4_b']))
self.Mixed_5c.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way1_w']))
self.Mixed_5c.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer2_2']['way1_b']))
self.Mixed_5c.branch5x5_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_1_w']))
self.Mixed_5c.branch5x5_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_1_b']))
self.Mixed_5c.branch5x5_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_2_w']))
self.Mixed_5c.branch5x5_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_2_b']))
self.Mixed_5c.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_1_w']))
self.Mixed_5c.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_1_b']))
self.Mixed_5c.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_2_w']))
self.Mixed_5c.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_2_b']))
self.Mixed_5c.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_3_w']))
self.Mixed_5c.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_3_b']))
self.Mixed_5c.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way4_w']))
self.Mixed_5c.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way4_b']))
self.Mixed_5d.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way1_w']))
self.Mixed_5d.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer2_3']['way1_b']))
self.Mixed_5d.branch5x5_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_1_w']))
self.Mixed_5d.branch5x5_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_1_b']))
self.Mixed_5d.branch5x5_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_2_w']))
self.Mixed_5d.branch5x5_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_2_b']))
self.Mixed_5d.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_1_w']))
self.Mixed_5d.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_1_b']))
self.Mixed_5d.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_2_w']))
self.Mixed_5d.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_2_b']))
self.Mixed_5d.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_3_w']))
self.Mixed_5d.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_3_b']))
self.Mixed_5d.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way4_w']))
self.Mixed_5d.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way4_b']))
self.Mixed_6a.branch3x3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way1_w']))
self.Mixed_6a.branch3x3.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer3']['way1_b']))
self.Mixed_6a.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_1_w']))
self.Mixed_6a.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_1_b']))
self.Mixed_6a.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_2_w']))
self.Mixed_6a.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_2_b']))
self.Mixed_6a.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_3_w']))
self.Mixed_6a.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_3_b']))
self.Mixed_6b.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way1_w']))
self.Mixed_6b.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_1']['way1_b']))
self.Mixed_6b.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_1_w']))
self.Mixed_6b.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_1_b']))
self.Mixed_6b.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_2_w']))
self.Mixed_6b.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_2_b']))
self.Mixed_6b.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_3_w']))
self.Mixed_6b.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_3_b']))
self.Mixed_6b.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_1_w']))
self.Mixed_6b.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_1_b']))
self.Mixed_6b.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_2_w']))
self.Mixed_6b.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_2_b']))
self.Mixed_6b.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_3_w']))
self.Mixed_6b.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_3_b']))
self.Mixed_6b.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_4_w']))
self.Mixed_6b.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_4_b']))
self.Mixed_6b.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_5_w']))
self.Mixed_6b.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_5_b']))
self.Mixed_6b.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way4_w']))
self.Mixed_6b.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way4_b']))
self.Mixed_6c.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way1_w']))
self.Mixed_6c.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_2']['way1_b']))
self.Mixed_6c.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_1_w']))
self.Mixed_6c.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_1_b']))
self.Mixed_6c.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_2_w']))
self.Mixed_6c.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_2_b']))
self.Mixed_6c.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_3_w']))
self.Mixed_6c.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_3_b']))
self.Mixed_6c.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_1_w']))
self.Mixed_6c.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_1_b']))
self.Mixed_6c.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_2_w']))
self.Mixed_6c.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_2_b']))
self.Mixed_6c.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_3_w']))
self.Mixed_6c.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_3_b']))
self.Mixed_6c.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_4_w']))
self.Mixed_6c.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_4_b']))
self.Mixed_6c.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_5_w']))
self.Mixed_6c.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_5_b']))
self.Mixed_6c.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way4_w']))
self.Mixed_6c.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way4_b']))
self.Mixed_6d.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way1_w']))
self.Mixed_6d.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_3']['way1_b']))
self.Mixed_6d.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_1_w']))
self.Mixed_6d.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_1_b']))
self.Mixed_6d.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_2_w']))
self.Mixed_6d.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_2_b']))
self.Mixed_6d.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_3_w']))
self.Mixed_6d.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_3_b']))
self.Mixed_6d.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_1_w']))
self.Mixed_6d.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_1_b']))
self.Mixed_6d.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_2_w']))
self.Mixed_6d.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_2_b']))
self.Mixed_6d.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_3_w']))
self.Mixed_6d.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_3_b']))
self.Mixed_6d.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_4_w']))
self.Mixed_6d.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_4_b']))
self.Mixed_6d.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_5_w']))
self.Mixed_6d.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_5_b']))
self.Mixed_6d.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way4_w']))
self.Mixed_6d.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way4_b']))
self.Mixed_6e.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way1_w']))
self.Mixed_6e.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_4']['way1_b']))
self.Mixed_6e.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_1_w']))
self.Mixed_6e.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_1_b']))
self.Mixed_6e.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_2_w']))
self.Mixed_6e.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_2_b']))
self.Mixed_6e.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_3_w']))
self.Mixed_6e.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_3_b']))
self.Mixed_6e.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_1_w']))
self.Mixed_6e.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_1_b']))
self.Mixed_6e.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_2_w']))
self.Mixed_6e.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_2_b']))
self.Mixed_6e.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_3_w']))
self.Mixed_6e.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_3_b']))
self.Mixed_6e.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_4_w']))
self.Mixed_6e.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_4_b']))
self.Mixed_6e.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_5_w']))
self.Mixed_6e.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_5_b']))
self.Mixed_6e.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way4_w']))
self.Mixed_6e.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way4_b']))
self.Mixed_7a.branch3x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_1_w']))
self.Mixed_7a.branch3x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_1_b']))
self.Mixed_7a.branch3x3_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_2_w']))
self.Mixed_7a.branch3x3_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_2_b']))
self.Mixed_7a.branch7x7x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_1_w']))
self.Mixed_7a.branch7x7x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_1_b']))
self.Mixed_7a.branch7x7x3_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_2_w']))
self.Mixed_7a.branch7x7x3_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_2_b']))
self.Mixed_7a.branch7x7x3_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_3_w']))
self.Mixed_7a.branch7x7x3_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_3_b']))
self.Mixed_7a.branch7x7x3_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_4_w']))
self.Mixed_7a.branch7x7x3_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_4_b']))
self.Mixed_7b.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way1_w']))
self.Mixed_7b.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer6_1']['way1_b']))
self.Mixed_7b.branch3x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way23_1_w']))
self.Mixed_7b.branch3x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way23_1_b']))
self.Mixed_7b.branch3x3_2a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way2_2_w']))
self.Mixed_7b.branch3x3_2a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way2_2_b']))
self.Mixed_7b.branch3x3_2b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way3_2_w']))
self.Mixed_7b.branch3x3_2b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way3_2_b']))
self.Mixed_7b.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_1_w']))
self.Mixed_7b.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_1_b']))
self.Mixed_7b.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_2_w']))
self.Mixed_7b.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_2_b']))
self.Mixed_7b.branch3x3dbl_3a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way4_3_w']))
self.Mixed_7b.branch3x3dbl_3a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way4_3_b']))
self.Mixed_7b.branch3x3dbl_3b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way5_3_w']))
self.Mixed_7b.branch3x3dbl_3b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way5_3_b']))
self.Mixed_7b.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way6_w']))
self.Mixed_7b.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way6_b']))
self.Mixed_7c.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way1_w']))
self.Mixed_7c.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer6_2']['way1_b']))
self.Mixed_7c.branch3x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way23_1_w']))
self.Mixed_7c.branch3x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way23_1_b']))
self.Mixed_7c.branch3x3_2a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way2_2_w']))
self.Mixed_7c.branch3x3_2a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way2_2_b']))
self.Mixed_7c.branch3x3_2b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way3_2_w']))
self.Mixed_7c.branch3x3_2b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way3_2_b']))
self.Mixed_7c.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_1_w']))
self.Mixed_7c.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_1_b']))
self.Mixed_7c.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_2_w']))
self.Mixed_7c.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_2_b']))
self.Mixed_7c.branch3x3dbl_3a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way4_3_w']))
self.Mixed_7c.branch3x3dbl_3a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way4_3_b']))
self.Mixed_7c.branch3x3dbl_3b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way5_3_w']))
self.Mixed_7c.branch3x3dbl_3b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way5_3_b']))
self.Mixed_7c.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way6_w']))
self.Mixed_7c.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way6_b']))
self.fc.weight = nn.Parameter(torch.FloatTensor(dict['outputlayer']
['fc_w']))
self.fc.bias = nn.Parameter(torch.FloatTensor(dict['outputlayer'][
'fc_b']))
def forward(self, x):
if self.transform_input:
x = x.clone()
x[:, 0, :, :] = x[:, 0, :, :] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[:, 1, :, :] = x[:, 1, :, :] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[:, 2, :, :] = x[:, 2, :, :] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
x = F.avg_pool2d(x, kernel_size=8)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 3, 512, 512])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 80
y1 = yindex // 80
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 80 * x2 + 720 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 48
y1 = yindex // 48
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 48 * x2 + 1200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 96
y1 = yindex // 96
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 96 * x2 + 864 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 288
y1 = yindex // 288
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 288 * x2 + 2592 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 896 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 896 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 160
y1 = yindex // 160
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 160 * x2 + 1120 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 160
y1 = yindex // 160
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 160 * x2 + 1120 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 7
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1344 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_13(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1728 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_14(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1728 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_15(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 3
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 3 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 384 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_16(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 448
y1 = yindex // 448
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 448 * x2 + 4032 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_add_copy_mul_17(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
y0 = yindex % 3
x2 = xindex
y1 = yindex // 3
y3 = yindex
tmp7 = tl.load(in_ptr0 + (x2 + 786432 * y1), ymask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (262144 + x2 + 786432 * y1), ymask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (524288 + x2 + 786432 * y1), ymask,
eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (x2 + 262144 * y3), ymask, eviction_policy=
'evict_last')
tmp0 = y0
tmp1 = tl.full([1, 1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1, 1], 1, tl.int32)
tmp4 = tmp1 == tmp3
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp3 == tmp5
tmp8 = 0.458
tmp9 = tmp7 * tmp8
tmp10 = -0.030000000000000027
tmp11 = tmp9 + tmp10
tmp13 = tl.where(tmp6, tmp11, tmp12)
tmp14 = 0.448
tmp15 = tmp13 * tmp14
tmp16 = -0.08799999999999997
tmp17 = tmp15 + tmp16
tmp18 = tmp1 == tmp5
tmp20 = tl.where(tmp18, tmp11, tmp19)
tmp21 = tl.where(tmp4, tmp17, tmp20)
tmp22 = 0.45
tmp23 = tmp21 * tmp22
tmp24 = -0.18799999999999994
tmp25 = tmp23 + tmp24
tmp26 = tmp0 == tmp3
tmp27 = tmp0 == tmp5
tmp29 = tl.where(tmp27, tmp11, tmp28)
tmp30 = tl.where(tmp26, tmp17, tmp29)
tmp31 = tl.where(tmp2, tmp25, tmp30)
tl.store(out_ptr0 + (y0 + 3 * x2 + 786432 * y1), tmp31, ymask)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8323200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8193152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16386304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4064256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 126
x2 = xindex // 8064 % 126
x3 = xindex // 1016064
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 32384 * x2 + 4096576 * x3), xmask
)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (16192 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (16256 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (16320 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (32384 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (32448 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (32512 + x0 + 128 * x1 + 32384 * x2 + 4096576 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 5080320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 80
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_23(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_24(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 2857728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 192
x1 = xindex // 192 % 61
x2 = xindex // 11712 % 61
x3 = xindex // 714432
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 384 * x1 + 47616 * x2 + 2952192 * x3), xmask
)
tmp1 = tl.load(in_ptr0 + (192 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (384 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (23808 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (24000 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (24192 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (47616 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (47808 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (48000 + x0 + 384 * x1 + 47616 * x2 + 2952192 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_25(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 714432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_26(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 952576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_27(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1428864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_28(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 2857728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 11712 % 61
x1 = xindex // 192 % 61
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 61, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-11904 + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-11712 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-11520 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-192 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (192 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (11520 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (11712 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (11904 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (62 * (62 <= 2 + x1) + (2 +
x1) * (2 + x1 < 62)) * (62 * (62 <= 2 + x2) + (2 + x2) * (2 + x2 < 62)
) + -1 * x1 * (62 * (62 <= 2 + x2) + (2 + x2) * (2 + x2 < 62)
) + -1 * x2 * (62 * (62 <= 2 + x1) + (2 + x1) * (2 + x1 < 62)) + (
62 * (62 <= 2 + x1) + (2 + x1) * (2 + x1 < 62)) + (62 * (62 <= 2 +
x2) + (2 + x2) * (2 + x2 < 62))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3810304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + (-64 + x0), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 224, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (96 * x1 + (-128 + x0)), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + (-128 + x0), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1], 256, tl.int64)
tmp35 = tl.load(in_ptr6 + (32 * x1 + (-224 + x0)), tmp32 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + (-224 + x0), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x2, tmp43, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_30(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3810304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 15616 % 61
x1 = xindex // 256 % 61
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 61, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-15872 + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-15616 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-15360 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-256 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (256 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (15360 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (15616 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (15872 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (62 * (62 <= 2 + x1) + (2 +
x1) * (2 + x1 < 62)) * (62 * (62 <= 2 + x2) + (2 + x2) * (2 + x2 < 62)
) + -1 * x1 * (62 * (62 <= 2 + x2) + (2 + x2) * (2 + x2 < 62)
) + -1 * x2 * (62 * (62 <= 2 + x1) + (2 + x1) * (2 + x1 < 62)) + (
62 * (62 <= 2 + x1) + (2 + x1) * (2 + x1 < 62)) + (62 * (62 <= 2 +
x2) + (2 + x2) * (2 + x2 < 62))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4286592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 288
x1 = xindex // 288
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp15 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + (-64 + x0), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 224, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (96 * x1 + (-128 + x0)), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + (-128 + x0), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1], 288, tl.int64)
tmp35 = tl.load(in_ptr6 + (64 * x1 + (-224 + x0)), tmp32 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + (-224 + x0), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x2, tmp43, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_32(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4286592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 17568 % 61
x1 = xindex // 288 % 61
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 61, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-17856 + x6), tmp10 & xmask, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-17568 + x6), tmp16 & xmask, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-17280 + x6), tmp23 & xmask, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-288 + x6), tmp30 & xmask, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33 & xmask, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (288 + x6), tmp36 & xmask, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (17280 + x6), tmp43 & xmask, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (17568 + x6), tmp46 & xmask, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (17856 + x6), tmp49 & xmask, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (62 * (62 <= 2 + x1) + (2 +
x1) * (2 + x1 < 62)) * (62 * (62 <= 2 + x2) + (2 + x2) * (2 + x2 < 62)
) + -1 * x1 * (62 * (62 <= 2 + x2) + (2 + x2) * (2 + x2 < 62)
) + -1 * x2 * (62 * (62 <= 2 + x1) + (2 + x1) * (2 + x1 < 62)) + (
62 * (62 <= 2 + x1) + (2 + x1) * (2 + x1 < 62)) + (62 * (62 <= 2 +
x2) + (2 + x2) * (2 + x2 < 62))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_33(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 3600
xnumel = 288
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 30
y1 = yindex // 30 % 30
y2 = yindex // 900
y4 = yindex % 900
y5 = yindex
tmp0 = tl.load(in_ptr0 + (x3 + 576 * y0 + 35136 * y1 + 1071648 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (288 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (576 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17568 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (17856 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (18144 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (35136 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (35424 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (35712 + x3 + 576 * y0 + 35136 * y1 + 1071648 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1, 1], 1, tl.int8)
tmp19 = tl.full([1, 1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1, 1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1, 1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1, 1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1, 1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1, 1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1, 1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1, 1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (y4 + 900 * x3 + 691200 * y2), tmp16, xmask & ymask)
tl.store(out_ptr1 + (x3 + 288 * y5), tmp41, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_34(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 1536
xnumel = 900
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (y0 + 384 * x2 + 345600 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 900 * y0 + 691200 * y1), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 384 * x2 + 345600 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_35(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 384
xnumel = 900
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 96
y1 = yindex // 96
tmp0 = tl.load(in_ptr0 + (y0 + 96 * x2 + 86400 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 900 * y0 + 691200 * y1), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 96 * x2 + 86400 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_36(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 900
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 768
y1 = yindex // 768
tmp0 = tl.load(in_ptr0 + (x2 + 900 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 768 * x2 + 691200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_37(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_avg_pool2d_38(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 23040 % 30
x1 = xindex // 768 % 30
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 30, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-23808 + x6), tmp10, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-23040 + x6), tmp16, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-22272 + x6), tmp23, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-768 + x6), tmp30, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (768 + x6), tmp36, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (22272 + x6), tmp43, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (23040 + x6), tmp46, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (23808 + x6), tmp49, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (31 * (31 <= 2 + x1) + (2 +
x1) * (2 + x1 < 31)) * (31 * (31 <= 2 + x2) + (2 + x2) * (2 + x2 < 31)
) + -1 * x1 * (31 * (31 <= 2 + x2) + (2 + x2) * (2 + x2 < 31)
) + -1 * x2 * (31 * (31 <= 2 + x1) + (2 + x1) * (2 + x1 < 31)) + (
31 * (31 <= 2 + x1) + (2 + x1) * (2 + x1 < 31)) + (31 * (31 <= 2 +
x2) + (2 + x2) * (2 + x2 < 31))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, None)
@triton.jit
def triton_poi_fused_cat_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 768
x1 = xindex // 768
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 192, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (192 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 384, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (192 * x1 + (-192 + x0)), tmp15,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr3 + (-192 + x0), tmp15, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 576, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (192 * x1 + (-384 + x0)), tmp25,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr5 + (-384 + x0), tmp25, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1], 768, tl.int64)
tmp35 = tl.load(in_ptr6 + (192 * x1 + (-576 + x0)), tmp32,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr7 + (-576 + x0), tmp32, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x2, tmp43, None)
@triton.jit
def triton_poi_fused_convolution_relu_40(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 576000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 160
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_41(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 691200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_42(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 784
xnumel = 768
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 14
y1 = yindex // 14 % 14
y2 = yindex // 196
y4 = yindex % 196
y5 = yindex
tmp0 = tl.load(in_ptr0 + (x3 + 1536 * y0 + 46080 * y1 + 691200 * y2),
xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (768 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1536 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (23040 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (23808 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (24576 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (46080 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (46848 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (47616 + x3 + 1536 * y0 + 46080 * y1 + 691200 *
y2), xmask & ymask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1, 1], 1, tl.int8)
tmp19 = tl.full([1, 1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1, 1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1, 1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1, 1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1, 1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1, 1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1, 1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1, 1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (y4 + 196 * x3 + 250880 * y2), tmp16, xmask & ymask)
tl.store(out_ptr1 + (x3 + 768 * y5), tmp41, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_43(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 1280
xnumel = 196
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (y0 + 320 * x2 + 62720 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 196 * y0 + 250880 * y1), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 320 * x2 + 62720 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_44(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 196
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (y0 + 192 * x2 + 37632 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 196 * y0 + 250880 * y1), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 192 * x2 + 37632 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_cat_45(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 196
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1280
y1 = yindex // 1280
tmp0 = tl.load(in_ptr0 + (x2 + 196 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 1280 * x2 + 250880 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_46(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_47(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 351232
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 448
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_48(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 17920 % 14
x1 = xindex // 1280 % 14
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 14, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-19200 + x6), tmp10, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-17920 + x6), tmp16, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-16640 + x6), tmp23, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1280 + x6), tmp30, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1280 + x6), tmp36, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (16640 + x6), tmp43, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (17920 + x6), tmp46, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (19200 + x6), tmp49, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (15 * (15 <= 2 + x1) + (2 +
x1) * (2 + x1 < 15)) * (15 * (15 <= 2 + x2) + (2 + x2) * (2 + x2 < 15)
) + -1 * x1 * (15 * (15 <= 2 + x2) + (2 + x2) * (2 + x2 < 15)
) + -1 * x2 * (15 * (15 <= 2 + x1) + (2 + x1) * (2 + x1 < 15)) + (
15 * (15 <= 2 + x1) + (2 + x1) * (2 + x1 < 15)) + (15 * (15 <= 2 +
x2) + (2 + x2) * (2 + x2 < 15))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, None)
@triton.jit
def triton_poi_fused_cat_49(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2048
x1 = xindex // 2048
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 320, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (320 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 1088, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = -320 + x0
tmp18 = tl.full([1], 384, tl.int64)
tmp19 = tmp16 < tmp18
tmp20 = tmp19 & tmp15
tmp21 = tl.load(in_ptr2 + (384 * x1 + (-320 + x0)), tmp20,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (-320 + x0), tmp20, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp8, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp16 >= tmp18
tl.full([1], 768, tl.int64)
tmp30 = tmp27 & tmp15
tmp31 = tl.load(in_ptr4 + (384 * x1 + (-384 + (-320 + x0))), tmp30,
eviction_policy='evict_last', other=0.0)
tmp32 = tl.load(in_ptr5 + (-384 + (-320 + x0)), tmp30, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp8, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp30, tmp34, tmp35)
tmp37 = tl.where(tmp19, tmp26, tmp36)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp15, tmp37, tmp38)
tmp40 = tmp0 >= tmp13
tmp41 = tl.full([1], 1856, tl.int64)
tmp42 = tmp0 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = -1088 + x0
tmp46 = tmp44 < tmp18
tmp47 = tmp46 & tmp43
tmp48 = tl.load(in_ptr6 + (384 * x1 + (-1088 + x0)), tmp47,
eviction_policy='evict_last', other=0.0)
tmp49 = tl.load(in_ptr7 + (-1088 + x0), tmp47, eviction_policy=
'evict_last', other=0.0)
tmp50 = tmp48 + tmp49
tmp51 = triton_helpers.maximum(tmp8, tmp50)
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp47, tmp51, tmp52)
tmp54 = tmp44 >= tmp18
tmp56 = tmp54 & tmp43
tmp57 = tl.load(in_ptr8 + (384 * x1 + (-384 + (-1088 + x0))), tmp56,
eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr9 + (-384 + (-1088 + x0)), tmp56, eviction_policy
='evict_last', other=0.0)
tmp59 = tmp57 + tmp58
tmp60 = triton_helpers.maximum(tmp8, tmp59)
tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype)
tmp62 = tl.where(tmp56, tmp60, tmp61)
tmp63 = tl.where(tmp46, tmp53, tmp62)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp43, tmp63, tmp64)
tmp66 = tmp0 >= tmp41
tl.full([1], 2048, tl.int64)
tmp69 = tl.load(in_ptr10 + (192 * x1 + (-1856 + x0)), tmp66,
eviction_policy='evict_last', other=0.0)
tmp70 = tl.load(in_ptr11 + (-1856 + x0), tmp66, eviction_policy=
'evict_last', other=0.0)
tmp71 = tmp69 + tmp70
tmp72 = triton_helpers.maximum(tmp8, tmp71)
tmp73 = tl.full(tmp72.shape, 0.0, tmp72.dtype)
tmp74 = tl.where(tmp66, tmp72, tmp73)
tmp75 = tl.where(tmp43, tmp65, tmp74)
tmp76 = tl.where(tmp15, tmp39, tmp75)
tmp77 = tl.where(tmp4, tmp11, tmp76)
tl.store(out_ptr0 + x2, tmp77, None)
@triton.jit
def triton_poi_fused_avg_pool2d_50(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 28672 % 14
x1 = xindex // 2048 % 14
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 14, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-30720 + x6), tmp10, other=0.0)
tmp12 = x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-28672 + x6), tmp16, other=0.0)
tmp18 = tmp17 + tmp11
tmp19 = 1 + x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-26624 + x6), tmp23, other=0.0)
tmp25 = tmp24 + tmp18
tmp26 = x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-2048 + x6), tmp30, other=0.0)
tmp32 = tmp31 + tmp25
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x6, tmp33, other=0.0)
tmp35 = tmp34 + tmp32
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (2048 + x6), tmp36, other=0.0)
tmp38 = tmp37 + tmp35
tmp39 = 1 + x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (26624 + x6), tmp43, other=0.0)
tmp45 = tmp44 + tmp38
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (28672 + x6), tmp46, other=0.0)
tmp48 = tmp47 + tmp45
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (30720 + x6), tmp49, other=0.0)
tmp51 = tmp50 + tmp48
tmp52 = 1 + -1 * x1 + -1 * x2 + x1 * x2 + (15 * (15 <= 2 + x1) + (2 +
x1) * (2 + x1 < 15)) * (15 * (15 <= 2 + x2) + (2 + x2) * (2 + x2 < 15)
) + -1 * x1 * (15 * (15 <= 2 + x2) + (2 + x2) * (2 + x2 < 15)
) + -1 * x2 * (15 * (15 <= 2 + x1) + (2 + x1) * (2 + x1 < 15)) + (
15 * (15 <= 2 + x1) + (2 + x1) * (2 + x1 < 15)) + (15 * (15 <= 2 +
x2) + (2 + x2) * (2 + x2 < 15))
tmp53 = tmp51 / tmp52
tl.store(out_ptr0 + x6, tmp53, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_51(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 150528
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_52(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 384
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_53(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 250880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 320
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_54(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 691200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_55(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 952576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_56(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1428864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_57(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 476288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170, primals_171, primals_172,
primals_173, primals_174, primals_175, primals_176, primals_177,
primals_178, primals_179, primals_180, primals_181, primals_182,
primals_183, primals_184, primals_185, primals_186, primals_187,
primals_188, primals_189, primals_190, primals_191) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 512, 512), (786432, 262144, 512, 1))
assert_size_stride(primals_2, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (80, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (80,), (1,))
assert_size_stride(primals_10, (192, 80, 3, 3), (720, 9, 3, 1))
assert_size_stride(primals_11, (192,), (1,))
assert_size_stride(primals_12, (64, 192, 1, 1), (192, 1, 1, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (48, 192, 1, 1), (192, 1, 1, 1))
assert_size_stride(primals_15, (48,), (1,))
assert_size_stride(primals_16, (64, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 192, 1, 1), (192, 1, 1, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_21, (96,), (1,))
assert_size_stride(primals_22, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_23, (96,), (1,))
assert_size_stride(primals_24, (32, 192, 1, 1), (192, 1, 1, 1))
assert_size_stride(primals_25, (32,), (1,))
assert_size_stride(primals_26, (64, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_27, (64,), (1,))
assert_size_stride(primals_28, (48, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_29, (48,), (1,))
assert_size_stride(primals_30, (64, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_31, (64,), (1,))
assert_size_stride(primals_32, (64, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_33, (64,), (1,))
assert_size_stride(primals_34, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_35, (96,), (1,))
assert_size_stride(primals_36, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_37, (96,), (1,))
assert_size_stride(primals_38, (64, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (64, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (48, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_43, (48,), (1,))
assert_size_stride(primals_44, (64, 48, 5, 5), (1200, 25, 5, 1))
assert_size_stride(primals_45, (64,), (1,))
assert_size_stride(primals_46, (64, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_47, (64,), (1,))
assert_size_stride(primals_48, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_49, (96,), (1,))
assert_size_stride(primals_50, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_51, (96,), (1,))
assert_size_stride(primals_52, (64, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_53, (64,), (1,))
assert_size_stride(primals_54, (384, 288, 3, 3), (2592, 9, 3, 1))
assert_size_stride(primals_55, (384,), (1,))
assert_size_stride(primals_56, (64, 288, 1, 1), (288, 1, 1, 1))
assert_size_stride(primals_57, (64,), (1,))
assert_size_stride(primals_58, (96, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (96,), (1,))
assert_size_stride(primals_60, (96, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_61, (96,), (1,))
assert_size_stride(primals_62, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_63, (192,), (1,))
assert_size_stride(primals_64, (128, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_65, (128,), (1,))
assert_size_stride(primals_66, (128, 128, 1, 7), (896, 7, 7, 1))
assert_size_stride(primals_67, (128,), (1,))
assert_size_stride(primals_68, (192, 128, 7, 1), (896, 7, 1, 1))
assert_size_stride(primals_69, (192,), (1,))
assert_size_stride(primals_70, (128, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_71, (128,), (1,))
assert_size_stride(primals_72, (128, 128, 7, 1), (896, 7, 1, 1))
assert_size_stride(primals_73, (128,), (1,))
assert_size_stride(primals_74, (128, 128, 1, 7), (896, 7, 7, 1))
assert_size_stride(primals_75, (128,), (1,))
assert_size_stride(primals_76, (128, 128, 7, 1), (896, 7, 1, 1))
assert_size_stride(primals_77, (128,), (1,))
assert_size_stride(primals_78, (192, 128, 1, 7), (896, 7, 7, 1))
assert_size_stride(primals_79, (192,), (1,))
assert_size_stride(primals_80, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_81, (192,), (1,))
assert_size_stride(primals_82, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_83, (192,), (1,))
assert_size_stride(primals_84, (160, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_85, (160,), (1,))
assert_size_stride(primals_86, (160, 160, 1, 7), (1120, 7, 7, 1))
assert_size_stride(primals_87, (160,), (1,))
assert_size_stride(primals_88, (192, 160, 7, 1), (1120, 7, 1, 1))
assert_size_stride(primals_89, (192,), (1,))
assert_size_stride(primals_90, (160, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_91, (160,), (1,))
assert_size_stride(primals_92, (160, 160, 7, 1), (1120, 7, 1, 1))
assert_size_stride(primals_93, (160,), (1,))
assert_size_stride(primals_94, (160, 160, 1, 7), (1120, 7, 7, 1))
assert_size_stride(primals_95, (160,), (1,))
assert_size_stride(primals_96, (160, 160, 7, 1), (1120, 7, 1, 1))
assert_size_stride(primals_97, (160,), (1,))
assert_size_stride(primals_98, (192, 160, 1, 7), (1120, 7, 7, 1))
assert_size_stride(primals_99, (192,), (1,))
assert_size_stride(primals_100, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_101, (192,), (1,))
assert_size_stride(primals_102, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_103, (192,), (1,))
assert_size_stride(primals_104, (160, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_105, (160,), (1,))
assert_size_stride(primals_106, (160, 160, 1, 7), (1120, 7, 7, 1))
assert_size_stride(primals_107, (160,), (1,))
assert_size_stride(primals_108, (192, 160, 7, 1), (1120, 7, 1, 1))
assert_size_stride(primals_109, (192,), (1,))
assert_size_stride(primals_110, (160, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_111, (160,), (1,))
assert_size_stride(primals_112, (160, 160, 7, 1), (1120, 7, 1, 1))
assert_size_stride(primals_113, (160,), (1,))
assert_size_stride(primals_114, (160, 160, 1, 7), (1120, 7, 7, 1))
assert_size_stride(primals_115, (160,), (1,))
assert_size_stride(primals_116, (160, 160, 7, 1), (1120, 7, 1, 1))
assert_size_stride(primals_117, (160,), (1,))
assert_size_stride(primals_118, (192, 160, 1, 7), (1120, 7, 7, 1))
assert_size_stride(primals_119, (192,), (1,))
assert_size_stride(primals_120, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_121, (192,), (1,))
assert_size_stride(primals_122, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_123, (192,), (1,))
assert_size_stride(primals_124, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_125, (192,), (1,))
assert_size_stride(primals_126, (192, 192, 1, 7), (1344, 7, 7, 1))
assert_size_stride(primals_127, (192,), (1,))
assert_size_stride(primals_128, (192, 192, 7, 1), (1344, 7, 1, 1))
assert_size_stride(primals_129, (192,), (1,))
assert_size_stride(primals_130, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_131, (192,), (1,))
assert_size_stride(primals_132, (192, 192, 7, 1), (1344, 7, 1, 1))
assert_size_stride(primals_133, (192,), (1,))
assert_size_stride(primals_134, (192, 192, 1, 7), (1344, 7, 7, 1))
assert_size_stride(primals_135, (192,), (1,))
assert_size_stride(primals_136, (192, 192, 7, 1), (1344, 7, 1, 1))
assert_size_stride(primals_137, (192,), (1,))
assert_size_stride(primals_138, (192, 192, 1, 7), (1344, 7, 7, 1))
assert_size_stride(primals_139, (192,), (1,))
assert_size_stride(primals_140, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_141, (192,), (1,))
assert_size_stride(primals_142, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_143, (192,), (1,))
assert_size_stride(primals_144, (320, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_145, (320,), (1,))
assert_size_stride(primals_146, (192, 768, 1, 1), (768, 1, 1, 1))
assert_size_stride(primals_147, (192,), (1,))
assert_size_stride(primals_148, (192, 192, 1, 7), (1344, 7, 7, 1))
assert_size_stride(primals_149, (192,), (1,))
assert_size_stride(primals_150, (192, 192, 7, 1), (1344, 7, 1, 1))
assert_size_stride(primals_151, (192,), (1,))
assert_size_stride(primals_152, (192, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_153, (192,), (1,))
assert_size_stride(primals_154, (320, 1280, 1, 1), (1280, 1, 1, 1))
assert_size_stride(primals_155, (320,), (1,))
assert_size_stride(primals_156, (384, 1280, 1, 1), (1280, 1, 1, 1))
assert_size_stride(primals_157, (384,), (1,))
assert_size_stride(primals_158, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_159, (384,), (1,))
assert_size_stride(primals_160, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_161, (384,), (1,))
assert_size_stride(primals_162, (448, 1280, 1, 1), (1280, 1, 1, 1))
assert_size_stride(primals_163, (448,), (1,))
assert_size_stride(primals_164, (384, 448, 3, 3), (4032, 9, 3, 1))
assert_size_stride(primals_165, (384,), (1,))
assert_size_stride(primals_166, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_167, (384,), (1,))
assert_size_stride(primals_168, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_169, (384,), (1,))
assert_size_stride(primals_170, (192, 1280, 1, 1), (1280, 1, 1, 1))
assert_size_stride(primals_171, (192,), (1,))
assert_size_stride(primals_172, (320, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_173, (320,), (1,))
assert_size_stride(primals_174, (384, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_175, (384,), (1,))
assert_size_stride(primals_176, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_177, (384,), (1,))
assert_size_stride(primals_178, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_179, (384,), (1,))
assert_size_stride(primals_180, (448, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_181, (448,), (1,))
assert_size_stride(primals_182, (384, 448, 3, 3), (4032, 9, 3, 1))
assert_size_stride(primals_183, (384,), (1,))
assert_size_stride(primals_184, (384, 384, 1, 3), (1152, 3, 3, 1))
assert_size_stride(primals_185, (384,), (1,))
assert_size_stride(primals_186, (384, 384, 3, 1), (1152, 3, 1, 1))
assert_size_stride(primals_187, (384,), (1,))
assert_size_stride(primals_188, (192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_189, (192,), (1,))
assert_size_stride(primals_190, (1000, 2048), (2048, 1))
assert_size_stride(primals_191, (1000,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 9)](primals_2, buf0, 96, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((32, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_1[grid(1024, 9)](primals_4, buf1, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_6, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((192, 80, 3, 3), (720, 1, 240, 80), torch
.float32)
triton_poi_fused_3[grid(15360, 9)](primals_10, buf3, 15360, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((64, 48, 5, 5), (1200, 1, 240, 48), torch
.float32)
triton_poi_fused_4[grid(3072, 25)](primals_16, buf4, 3072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_16
buf5 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_5[grid(6144, 9)](primals_20, buf5, 6144, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf6 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch.
float32)
triton_poi_fused_6[grid(9216, 9)](primals_22, buf6, 9216, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_22
buf7 = empty_strided_cuda((64, 48, 5, 5), (1200, 1, 240, 48), torch
.float32)
triton_poi_fused_4[grid(3072, 25)](primals_30, buf7, 3072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_30
buf8 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch.
float32)
triton_poi_fused_5[grid(6144, 9)](primals_34, buf8, 6144, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_34
buf9 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch.
float32)
triton_poi_fused_6[grid(9216, 9)](primals_36, buf9, 9216, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_36
buf10 = empty_strided_cuda((64, 48, 5, 5), (1200, 1, 240, 48),
torch.float32)
triton_poi_fused_4[grid(3072, 25)](primals_44, buf10, 3072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_44
buf11 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_5[grid(6144, 9)](primals_48, buf11, 6144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_48
buf12 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch
.float32)
triton_poi_fused_6[grid(9216, 9)](primals_50, buf12, 9216, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_50
buf13 = empty_strided_cuda((384, 288, 3, 3), (2592, 1, 864, 288),
torch.float32)
triton_poi_fused_7[grid(110592, 9)](primals_54, buf13, 110592, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_54
buf14 = empty_strided_cuda((96, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_5[grid(6144, 9)](primals_58, buf14, 6144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_58
buf15 = empty_strided_cuda((96, 96, 3, 3), (864, 1, 288, 96), torch
.float32)
triton_poi_fused_6[grid(9216, 9)](primals_60, buf15, 9216, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_60
buf16 = empty_strided_cuda((128, 128, 1, 7), (896, 1, 896, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 7)](primals_66, buf16, 16384, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_66
buf17 = empty_strided_cuda((192, 128, 7, 1), (896, 1, 128, 128),
torch.float32)
triton_poi_fused_9[grid(24576, 7)](primals_68, buf17, 24576, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_68
buf18 = empty_strided_cuda((128, 128, 7, 1), (896, 1, 128, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 7)](primals_72, buf18, 16384, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_72
buf19 = empty_strided_cuda((128, 128, 1, 7), (896, 1, 896, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 7)](primals_74, buf19, 16384, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_74
buf20 = empty_strided_cuda((128, 128, 7, 1), (896, 1, 128, 128),
torch.float32)
triton_poi_fused_8[grid(16384, 7)](primals_76, buf20, 16384, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_76
buf21 = empty_strided_cuda((192, 128, 1, 7), (896, 1, 896, 128),
torch.float32)
triton_poi_fused_9[grid(24576, 7)](primals_78, buf21, 24576, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_78
buf22 = empty_strided_cuda((160, 160, 1, 7), (1120, 1, 1120, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_86, buf22, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_86
buf23 = empty_strided_cuda((192, 160, 7, 1), (1120, 1, 160, 160),
torch.float32)
triton_poi_fused_11[grid(30720, 7)](primals_88, buf23, 30720, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_88
buf24 = empty_strided_cuda((160, 160, 7, 1), (1120, 1, 160, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_92, buf24, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_92
buf25 = empty_strided_cuda((160, 160, 1, 7), (1120, 1, 1120, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_94, buf25, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_94
buf26 = empty_strided_cuda((160, 160, 7, 1), (1120, 1, 160, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_96, buf26, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_96
buf27 = empty_strided_cuda((192, 160, 1, 7), (1120, 1, 1120, 160),
torch.float32)
triton_poi_fused_11[grid(30720, 7)](primals_98, buf27, 30720, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_98
buf28 = empty_strided_cuda((160, 160, 1, 7), (1120, 1, 1120, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_106, buf28, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_106
buf29 = empty_strided_cuda((192, 160, 7, 1), (1120, 1, 160, 160),
torch.float32)
triton_poi_fused_11[grid(30720, 7)](primals_108, buf29, 30720, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_108
buf30 = empty_strided_cuda((160, 160, 7, 1), (1120, 1, 160, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_112, buf30, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_112
buf31 = empty_strided_cuda((160, 160, 1, 7), (1120, 1, 1120, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_114, buf31, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_114
buf32 = empty_strided_cuda((160, 160, 7, 1), (1120, 1, 160, 160),
torch.float32)
triton_poi_fused_10[grid(25600, 7)](primals_116, buf32, 25600, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_116
buf33 = empty_strided_cuda((192, 160, 1, 7), (1120, 1, 1120, 160),
torch.float32)
triton_poi_fused_11[grid(30720, 7)](primals_118, buf33, 30720, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_118
buf34 = empty_strided_cuda((192, 192, 1, 7), (1344, 1, 1344, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_126, buf34, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_126
buf35 = empty_strided_cuda((192, 192, 7, 1), (1344, 1, 192, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_128, buf35, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_128
buf36 = empty_strided_cuda((192, 192, 7, 1), (1344, 1, 192, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_132, buf36, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_132
buf37 = empty_strided_cuda((192, 192, 1, 7), (1344, 1, 1344, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_134, buf37, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_134
buf38 = empty_strided_cuda((192, 192, 7, 1), (1344, 1, 192, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_136, buf38, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_136
buf39 = empty_strided_cuda((192, 192, 1, 7), (1344, 1, 1344, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_138, buf39, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_138
buf40 = empty_strided_cuda((320, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
triton_poi_fused_13[grid(61440, 9)](primals_144, buf40, 61440, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_144
buf41 = empty_strided_cuda((192, 192, 1, 7), (1344, 1, 1344, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_148, buf41, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_148
buf42 = empty_strided_cuda((192, 192, 7, 1), (1344, 1, 192, 192),
torch.float32)
triton_poi_fused_12[grid(36864, 7)](primals_150, buf42, 36864, 7,
XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1)
del primals_150
buf43 = empty_strided_cuda((192, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
triton_poi_fused_14[grid(36864, 9)](primals_152, buf43, 36864, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_152
buf44 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_158, buf44, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_158
buf45 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_160, buf45, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_160
buf46 = empty_strided_cuda((384, 448, 3, 3), (4032, 1, 1344, 448),
torch.float32)
triton_poi_fused_16[grid(172032, 9)](primals_164, buf46, 172032, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_164
buf47 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_166, buf47, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_166
buf48 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_168, buf48, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_168
buf49 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_176, buf49, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_176
buf50 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_178, buf50, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_178
buf51 = empty_strided_cuda((384, 448, 3, 3), (4032, 1, 1344, 448),
torch.float32)
triton_poi_fused_16[grid(172032, 9)](primals_182, buf51, 172032, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_182
buf52 = empty_strided_cuda((384, 384, 1, 3), (1152, 1, 1152, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_184, buf52, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_184
buf53 = empty_strided_cuda((384, 384, 3, 1), (1152, 1, 384, 384),
torch.float32)
triton_poi_fused_15[grid(147456, 3)](primals_186, buf53, 147456, 3,
XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_186
buf54 = empty_strided_cuda((4, 3, 512, 512), (786432, 1, 1536, 3),
torch.float32)
triton_poi_fused_add_copy_mul_17[grid(12, 262144)](primals_1, buf54,
12, 262144, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf55 = extern_kernels.convolution(buf54, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 32, 255, 255), (2080800, 1, 8160, 32))
buf56 = buf55
del buf55
triton_poi_fused_convolution_relu_18[grid(8323200)](buf56,
primals_3, 8323200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf57 = extern_kernels.convolution(buf56, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 32, 253, 253), (2048288, 1, 8096, 32))
buf58 = buf57
del buf57
triton_poi_fused_convolution_relu_19[grid(8193152)](buf58,
primals_5, 8193152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf59 = extern_kernels.convolution(buf58, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 64, 253, 253), (4096576, 1, 16192, 64))
buf60 = buf59
del buf59
triton_poi_fused_convolution_relu_20[grid(16386304)](buf60,
primals_7, 16386304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf61 = empty_strided_cuda((4, 64, 126, 126), (1016064, 1, 8064, 64
), torch.float32)
buf62 = empty_strided_cuda((4, 64, 126, 126), (1016064, 1, 8064, 64
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_21[grid(4064256)](buf60,
buf61, buf62, 4064256, XBLOCK=512, num_warps=8, num_stages=1)
buf63 = extern_kernels.convolution(buf61, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 80, 126, 126), (1270080, 1, 10080, 80))
buf64 = buf63
del buf63
triton_poi_fused_convolution_relu_22[grid(5080320)](buf64,
primals_9, 5080320, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf65 = extern_kernels.convolution(buf64, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 192, 124, 124), (2952192, 1, 23808, 192))
buf66 = buf65
del buf65
triton_poi_fused_convolution_relu_23[grid(11808768)](buf66,
primals_11, 11808768, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf67 = empty_strided_cuda((4, 192, 61, 61), (714432, 1, 11712, 192
), torch.float32)
buf68 = empty_strided_cuda((4, 192, 61, 61), (714432, 1, 11712, 192
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_24[grid(2857728)](buf66,
buf67, buf68, 2857728, XBLOCK=512, num_warps=8, num_stages=1)
buf69 = extern_kernels.convolution(buf67, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf70 = extern_kernels.convolution(buf67, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 48, 61, 61), (178608, 1, 2928, 48))
buf71 = buf70
del buf70
triton_poi_fused_convolution_relu_25[grid(714432)](buf71,
primals_15, 714432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf72 = extern_kernels.convolution(buf71, buf4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf73 = extern_kernels.convolution(buf67, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf73, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf74 = buf73
del buf73
triton_poi_fused_convolution_relu_26[grid(952576)](buf74,
primals_19, 952576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_19
buf75 = extern_kernels.convolution(buf74, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf75, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf76 = buf75
del buf75
triton_poi_fused_convolution_relu_27[grid(1428864)](buf76,
primals_21, 1428864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf77 = extern_kernels.convolution(buf76, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf77, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf78 = empty_strided_cuda((4, 192, 61, 61), (714432, 1, 11712, 192
), torch.float32)
triton_poi_fused_avg_pool2d_28[grid(2857728)](buf67, buf78, 2857728,
XBLOCK=512, num_warps=8, num_stages=1)
buf79 = extern_kernels.convolution(buf78, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 32, 61, 61), (119072, 1, 1952, 32))
buf80 = empty_strided_cuda((4, 256, 61, 61), (952576, 1, 15616, 256
), torch.float32)
triton_poi_fused_cat_29[grid(3810304)](buf69, primals_13, buf72,
primals_17, buf77, primals_23, buf79, primals_25, buf80,
3810304, XBLOCK=512, num_warps=8, num_stages=1)
buf81 = extern_kernels.convolution(buf80, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf81, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf82 = extern_kernels.convolution(buf80, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 48, 61, 61), (178608, 1, 2928, 48))
buf83 = buf82
del buf82
triton_poi_fused_convolution_relu_25[grid(714432)](buf83,
primals_29, 714432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf84 = extern_kernels.convolution(buf83, buf7, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf85 = extern_kernels.convolution(buf80, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf85, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf86 = buf85
del buf85
triton_poi_fused_convolution_relu_26[grid(952576)](buf86,
primals_33, 952576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_33
buf87 = extern_kernels.convolution(buf86, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf88 = buf87
del buf87
triton_poi_fused_convolution_relu_27[grid(1428864)](buf88,
primals_35, 1428864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_35
buf89 = extern_kernels.convolution(buf88, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf90 = empty_strided_cuda((4, 256, 61, 61), (952576, 1, 15616, 256
), torch.float32)
triton_poi_fused_avg_pool2d_30[grid(3810304)](buf80, buf90, 3810304,
XBLOCK=1024, num_warps=4, num_stages=1)
buf91 = extern_kernels.convolution(buf90, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf92 = empty_strided_cuda((4, 288, 61, 61), (1071648, 1, 17568,
288), torch.float32)
triton_poi_fused_cat_31[grid(4286592)](buf81, primals_27, buf84,
primals_31, buf89, primals_37, buf91, primals_39, buf92,
4286592, XBLOCK=512, num_warps=8, num_stages=1)
buf93 = extern_kernels.convolution(buf92, primals_40, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf94 = extern_kernels.convolution(buf92, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf94, (4, 48, 61, 61), (178608, 1, 2928, 48))
buf95 = buf94
del buf94
triton_poi_fused_convolution_relu_25[grid(714432)](buf95,
primals_43, 714432, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf96 = extern_kernels.convolution(buf95, buf10, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf96, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf97 = extern_kernels.convolution(buf92, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf98 = buf97
del buf97
triton_poi_fused_convolution_relu_26[grid(952576)](buf98,
primals_47, 952576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_47
buf99 = extern_kernels.convolution(buf98, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf100 = buf99
del buf99
triton_poi_fused_convolution_relu_27[grid(1428864)](buf100,
primals_49, 1428864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_49
buf101 = extern_kernels.convolution(buf100, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf102 = empty_strided_cuda((4, 288, 61, 61), (1071648, 1, 17568,
288), torch.float32)
triton_poi_fused_avg_pool2d_32[grid(4286592)](buf92, buf102,
4286592, XBLOCK=512, num_warps=8, num_stages=1)
buf103 = extern_kernels.convolution(buf102, primals_52, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf104 = empty_strided_cuda((4, 288, 61, 61), (1071648, 1, 17568,
288), torch.float32)
triton_poi_fused_cat_31[grid(4286592)](buf93, primals_41, buf96,
primals_45, buf101, primals_51, buf103, primals_53, buf104,
4286592, XBLOCK=512, num_warps=8, num_stages=1)
buf105 = extern_kernels.convolution(buf104, buf13, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 384, 30, 30), (345600, 1, 11520, 384))
buf106 = extern_kernels.convolution(buf104, primals_56, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf106, (4, 64, 61, 61), (238144, 1, 3904, 64))
buf107 = buf106
del buf106
triton_poi_fused_convolution_relu_26[grid(952576)](buf107,
primals_57, 952576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_57
buf108 = extern_kernels.convolution(buf107, buf14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf108, (4, 96, 61, 61), (357216, 1, 5856, 96))
buf109 = buf108
del buf108
triton_poi_fused_convolution_relu_27[grid(1428864)](buf109,
primals_59, 1428864, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_59
buf110 = extern_kernels.convolution(buf109, buf15, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf110, (4, 96, 30, 30), (86400, 1, 2880, 96))
buf115 = empty_strided_cuda((4, 768, 30, 30), (691200, 900, 30, 1),
torch.float32)
buf111 = reinterpret_tensor(buf115, (4, 288, 30, 30), (691200, 900,
30, 1), 432000)
buf112 = empty_strided_cuda((4, 288, 30, 30), (259200, 1, 8640, 288
), torch.int8)
triton_poi_fused_max_pool2d_with_indices_33[grid(3600, 288)](buf104,
buf111, buf112, 3600, 288, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
buf113 = reinterpret_tensor(buf115, (4, 384, 30, 30), (691200, 900,
30, 1), 0)
buf267 = empty_strided_cuda((4, 384, 30, 30), (345600, 1, 11520,
384), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_34[grid(1536, 900)
](buf105, primals_55, buf113, buf267, 1536, 900, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del buf105
del primals_55
buf114 = reinterpret_tensor(buf115, (4, 96, 30, 30), (691200, 900,
30, 1), 345600)
buf266 = empty_strided_cuda((4, 96, 30, 30), (86400, 1, 2880, 96),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_35[grid(384, 900)
](buf110, primals_61, buf114, buf266, 384, 900, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del buf110
del primals_61
buf116 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_cat_36[grid(3072, 900)](buf115, buf116, 3072, 900,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf111
del buf113
del buf114
buf117 = extern_kernels.convolution(buf116, primals_62, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf117, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf118 = extern_kernels.convolution(buf116, primals_64, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf119 = buf118
del buf118
triton_poi_fused_convolution_relu_37[grid(460800)](buf119,
primals_65, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_65
buf120 = extern_kernels.convolution(buf119, buf16, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf121 = buf120
del buf120
triton_poi_fused_convolution_relu_37[grid(460800)](buf121,
primals_67, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_67
buf122 = extern_kernels.convolution(buf121, buf17, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf123 = extern_kernels.convolution(buf116, primals_70, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf123, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf124 = buf123
del buf123
triton_poi_fused_convolution_relu_37[grid(460800)](buf124,
primals_71, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_71
buf125 = extern_kernels.convolution(buf124, buf18, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf125, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf126 = buf125
del buf125
triton_poi_fused_convolution_relu_37[grid(460800)](buf126,
primals_73, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_73
buf127 = extern_kernels.convolution(buf126, buf19, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf127, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf128 = buf127
del buf127
triton_poi_fused_convolution_relu_37[grid(460800)](buf128,
primals_75, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_75
buf129 = extern_kernels.convolution(buf128, buf20, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf129, (4, 128, 30, 30), (115200, 1, 3840, 128))
buf130 = buf129
del buf129
triton_poi_fused_convolution_relu_37[grid(460800)](buf130,
primals_77, 460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_77
buf131 = extern_kernels.convolution(buf130, buf21, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf132 = reinterpret_tensor(buf115, (4, 768, 30, 30), (691200, 1,
23040, 768), 0)
del buf115
triton_poi_fused_avg_pool2d_38[grid(2764800)](buf116, buf132,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf133 = extern_kernels.convolution(buf132, primals_80, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf134 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_cat_39[grid(2764800)](buf117, primals_63, buf122,
primals_69, buf131, primals_79, buf133, primals_81, buf134,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf135 = extern_kernels.convolution(buf134, primals_82, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf135, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf136 = extern_kernels.convolution(buf134, primals_84, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf137 = buf136
del buf136
triton_poi_fused_convolution_relu_40[grid(576000)](buf137,
primals_85, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_85
buf138 = extern_kernels.convolution(buf137, buf22, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf139 = buf138
del buf138
triton_poi_fused_convolution_relu_40[grid(576000)](buf139,
primals_87, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_87
buf140 = extern_kernels.convolution(buf139, buf23, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf141 = extern_kernels.convolution(buf134, primals_90, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf141, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf142 = buf141
del buf141
triton_poi_fused_convolution_relu_40[grid(576000)](buf142,
primals_91, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_91
buf143 = extern_kernels.convolution(buf142, buf24, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf144 = buf143
del buf143
triton_poi_fused_convolution_relu_40[grid(576000)](buf144,
primals_93, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_93
buf145 = extern_kernels.convolution(buf144, buf25, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf146 = buf145
del buf145
triton_poi_fused_convolution_relu_40[grid(576000)](buf146,
primals_95, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_95
buf147 = extern_kernels.convolution(buf146, buf26, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf148 = buf147
del buf147
triton_poi_fused_convolution_relu_40[grid(576000)](buf148,
primals_97, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_97
buf149 = extern_kernels.convolution(buf148, buf27, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf150 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_avg_pool2d_38[grid(2764800)](buf134, buf150,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf151 = extern_kernels.convolution(buf150, primals_100, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf152 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_cat_39[grid(2764800)](buf135, primals_83, buf140,
primals_89, buf149, primals_99, buf151, primals_101, buf152,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf153 = extern_kernels.convolution(buf152, primals_102, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf154 = extern_kernels.convolution(buf152, primals_104, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf154, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf155 = buf154
del buf154
triton_poi_fused_convolution_relu_40[grid(576000)](buf155,
primals_105, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_105
buf156 = extern_kernels.convolution(buf155, buf28, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf156, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf157 = buf156
del buf156
triton_poi_fused_convolution_relu_40[grid(576000)](buf157,
primals_107, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_107
buf158 = extern_kernels.convolution(buf157, buf29, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf158, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf159 = extern_kernels.convolution(buf152, primals_110, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf160 = buf159
del buf159
triton_poi_fused_convolution_relu_40[grid(576000)](buf160,
primals_111, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_111
buf161 = extern_kernels.convolution(buf160, buf30, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf162 = buf161
del buf161
triton_poi_fused_convolution_relu_40[grid(576000)](buf162,
primals_113, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_113
buf163 = extern_kernels.convolution(buf162, buf31, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf164 = buf163
del buf163
triton_poi_fused_convolution_relu_40[grid(576000)](buf164,
primals_115, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_115
buf165 = extern_kernels.convolution(buf164, buf32, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 160, 30, 30), (144000, 1, 4800, 160))
buf166 = buf165
del buf165
triton_poi_fused_convolution_relu_40[grid(576000)](buf166,
primals_117, 576000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_117
buf167 = extern_kernels.convolution(buf166, buf33, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf168 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_avg_pool2d_38[grid(2764800)](buf152, buf168,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf169 = extern_kernels.convolution(buf168, primals_120, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf170 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_cat_39[grid(2764800)](buf153, primals_103, buf158,
primals_109, buf167, primals_119, buf169, primals_121, buf170,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf171 = extern_kernels.convolution(buf170, primals_122, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf172 = extern_kernels.convolution(buf170, primals_124, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf172, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf173 = buf172
del buf172
triton_poi_fused_convolution_relu_41[grid(691200)](buf173,
primals_125, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_125
buf174 = extern_kernels.convolution(buf173, buf34, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf174, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf175 = buf174
del buf174
triton_poi_fused_convolution_relu_41[grid(691200)](buf175,
primals_127, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_127
buf176 = extern_kernels.convolution(buf175, buf35, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf176, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf177 = extern_kernels.convolution(buf170, primals_130, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf178 = buf177
del buf177
triton_poi_fused_convolution_relu_41[grid(691200)](buf178,
primals_131, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_131
buf179 = extern_kernels.convolution(buf178, buf36, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf180 = buf179
del buf179
triton_poi_fused_convolution_relu_41[grid(691200)](buf180,
primals_133, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_133
buf181 = extern_kernels.convolution(buf180, buf37, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf182 = buf181
del buf181
triton_poi_fused_convolution_relu_41[grid(691200)](buf182,
primals_135, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_135
buf183 = extern_kernels.convolution(buf182, buf38, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf184 = buf183
del buf183
triton_poi_fused_convolution_relu_41[grid(691200)](buf184,
primals_137, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_137
buf185 = extern_kernels.convolution(buf184, buf39, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf185, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf186 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_avg_pool2d_38[grid(2764800)](buf170, buf186,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf187 = extern_kernels.convolution(buf186, primals_140, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf187, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf188 = empty_strided_cuda((4, 768, 30, 30), (691200, 1, 23040,
768), torch.float32)
triton_poi_fused_cat_39[grid(2764800)](buf171, primals_123, buf176,
primals_129, buf185, primals_139, buf187, primals_141, buf188,
2764800, XBLOCK=512, num_warps=8, num_stages=1)
buf189 = extern_kernels.convolution(buf188, primals_142, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf189, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf190 = buf189
del buf189
triton_poi_fused_convolution_relu_41[grid(691200)](buf190,
primals_143, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_143
buf191 = extern_kernels.convolution(buf190, buf40, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf191, (4, 320, 14, 14), (62720, 1, 4480, 320))
buf192 = extern_kernels.convolution(buf188, primals_146, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf192, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf193 = buf192
del buf192
triton_poi_fused_convolution_relu_41[grid(691200)](buf193,
primals_147, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_147
buf194 = extern_kernels.convolution(buf193, buf41, stride=(1, 1),
padding=(0, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf194, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf195 = buf194
del buf194
triton_poi_fused_convolution_relu_41[grid(691200)](buf195,
primals_149, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_149
buf196 = extern_kernels.convolution(buf195, buf42, stride=(1, 1),
padding=(3, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf196, (4, 192, 30, 30), (172800, 1, 5760, 192))
buf197 = buf196
del buf196
triton_poi_fused_convolution_relu_41[grid(691200)](buf197,
primals_151, 691200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_151
buf198 = extern_kernels.convolution(buf197, buf43, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf198, (4, 192, 14, 14), (37632, 1, 2688, 192))
buf203 = empty_strided_cuda((4, 1280, 14, 14), (250880, 196, 14, 1),
torch.float32)
buf199 = reinterpret_tensor(buf203, (4, 768, 14, 14), (250880, 196,
14, 1), 100352)
buf200 = empty_strided_cuda((4, 768, 14, 14), (150528, 1, 10752,
768), torch.int8)
triton_poi_fused_max_pool2d_with_indices_42[grid(784, 768)](buf188,
buf199, buf200, 784, 768, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
buf201 = reinterpret_tensor(buf203, (4, 320, 14, 14), (250880, 196,
14, 1), 0)
buf249 = empty_strided_cuda((4, 320, 14, 14), (62720, 1, 4480, 320),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_43[grid(1280, 196)
](buf191, primals_145, buf201, buf249, 1280, 196, XBLOCK=32,
YBLOCK=32, num_warps=4, num_stages=1)
del buf191
del primals_145
buf202 = reinterpret_tensor(buf203, (4, 192, 14, 14), (250880, 196,
14, 1), 62720)
buf248 = empty_strided_cuda((4, 192, 14, 14), (37632, 1, 2688, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_44[grid(768, 196)
](buf198, primals_153, buf202, buf248, 768, 196, XBLOCK=64,
YBLOCK=64, num_warps=8, num_stages=1)
del buf198
del primals_153
buf204 = empty_strided_cuda((4, 1280, 14, 14), (250880, 1, 17920,
1280), torch.float32)
triton_poi_fused_cat_45[grid(5120, 196)](buf203, buf204, 5120, 196,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf199
del buf201
del buf202
buf205 = extern_kernels.convolution(buf204, primals_154, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf205, (4, 320, 14, 14), (62720, 1, 4480, 320))
buf206 = extern_kernels.convolution(buf204, primals_156, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf206, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf207 = buf206
del buf206
triton_poi_fused_convolution_relu_46[grid(301056)](buf207,
primals_157, 301056, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_157
buf208 = extern_kernels.convolution(buf207, buf44, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf208, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf209 = extern_kernels.convolution(buf207, buf45, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf209, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf210 = extern_kernels.convolution(buf204, primals_162, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf210, (4, 448, 14, 14), (87808, 1, 6272, 448))
buf211 = buf210
del buf210
triton_poi_fused_convolution_relu_47[grid(351232)](buf211,
primals_163, 351232, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_163
buf212 = extern_kernels.convolution(buf211, buf46, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf212, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf213 = buf212
del buf212
triton_poi_fused_convolution_relu_46[grid(301056)](buf213,
primals_165, 301056, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_165
buf214 = extern_kernels.convolution(buf213, buf47, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf214, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf215 = extern_kernels.convolution(buf213, buf48, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf215, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf216 = reinterpret_tensor(buf203, (4, 1280, 14, 14), (250880, 1,
17920, 1280), 0)
del buf203
triton_poi_fused_avg_pool2d_48[grid(1003520)](buf204, buf216,
1003520, XBLOCK=512, num_warps=8, num_stages=1)
buf217 = extern_kernels.convolution(buf216, primals_170, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf217, (4, 192, 14, 14), (37632, 1, 2688, 192))
buf218 = empty_strided_cuda((4, 2048, 14, 14), (401408, 1, 28672,
2048), torch.float32)
triton_poi_fused_cat_49[grid(1605632)](buf205, primals_155, buf208,
primals_159, buf209, primals_161, buf214, primals_167, buf215,
primals_169, buf217, primals_171, buf218, 1605632, XBLOCK=512,
num_warps=8, num_stages=1)
buf219 = extern_kernels.convolution(buf218, primals_172, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf219, (4, 320, 14, 14), (62720, 1, 4480, 320))
buf220 = extern_kernels.convolution(buf218, primals_174, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf220, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf221 = buf220
del buf220
triton_poi_fused_convolution_relu_46[grid(301056)](buf221,
primals_175, 301056, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_175
buf222 = extern_kernels.convolution(buf221, buf49, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf222, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf223 = extern_kernels.convolution(buf221, buf50, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf223, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf224 = extern_kernels.convolution(buf218, primals_180, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf224, (4, 448, 14, 14), (87808, 1, 6272, 448))
buf225 = buf224
del buf224
triton_poi_fused_convolution_relu_47[grid(351232)](buf225,
primals_181, 351232, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_181
buf226 = extern_kernels.convolution(buf225, buf51, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf226, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf227 = buf226
del buf226
triton_poi_fused_convolution_relu_46[grid(301056)](buf227,
primals_183, 301056, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_183
buf228 = extern_kernels.convolution(buf227, buf52, stride=(1, 1),
padding=(0, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf228, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf229 = extern_kernels.convolution(buf227, buf53, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf229, (4, 384, 14, 14), (75264, 1, 5376, 384))
buf230 = empty_strided_cuda((4, 2048, 14, 14), (401408, 1, 28672,
2048), torch.float32)
triton_poi_fused_avg_pool2d_50[grid(1605632)](buf218, buf230,
1605632, XBLOCK=512, num_warps=8, num_stages=1)
buf231 = extern_kernels.convolution(buf230, primals_188, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf231, (4, 192, 14, 14), (37632, 1, 2688, 192))
buf232 = empty_strided_cuda((4, 2048, 14, 14), (401408, 1, 28672,
2048), torch.float32)
triton_poi_fused_cat_49[grid(1605632)](buf219, primals_173, buf222,
primals_177, buf223, primals_179, buf228, primals_185, buf229,
primals_187, buf231, primals_189, buf232, 1605632, XBLOCK=512,
num_warps=8, num_stages=1)
buf233 = torch.ops.aten.avg_pool2d.default(buf232, [8, 8], [8, 8],
[0, 0], False, True, None)
buf234 = buf233
del buf233
buf235 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.addmm(primals_191, reinterpret_tensor(buf234, (4,
2048), (2048, 1), 0), reinterpret_tensor(primals_190, (2048,
1000), (1, 2048), 0), alpha=1, beta=1, out=buf235)
del primals_191
buf236 = empty_strided_cuda((4, 192, 14, 14), (37632, 1, 2688, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_51[grid(150528)](
buf231, primals_189, buf236, 150528, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf231
del primals_189
buf237 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf229, primals_187, buf237, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf229
del primals_187
buf238 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf228, primals_185, buf238, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf228
del primals_185
buf239 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf223, primals_179, buf239, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf223
del primals_179
buf240 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf222, primals_177, buf240, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf222
del primals_177
buf241 = empty_strided_cuda((4, 320, 14, 14), (62720, 1, 4480, 320),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_53[grid(250880)](
buf219, primals_173, buf241, 250880, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf219
del primals_173
buf242 = empty_strided_cuda((4, 192, 14, 14), (37632, 1, 2688, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_51[grid(150528)](
buf217, primals_171, buf242, 150528, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf217
del primals_171
buf243 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf215, primals_169, buf243, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf215
del primals_169
buf244 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf214, primals_167, buf244, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf214
del primals_167
buf245 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf209, primals_161, buf245, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf209
del primals_161
buf246 = empty_strided_cuda((4, 384, 14, 14), (75264, 1, 5376, 384),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_52[grid(301056)](
buf208, primals_159, buf246, 301056, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf208
del primals_159
buf247 = empty_strided_cuda((4, 320, 14, 14), (62720, 1, 4480, 320),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_53[grid(250880)](
buf205, primals_155, buf247, 250880, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf205
del primals_155
buf250 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf187, primals_141, buf250, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf187
del primals_141
buf251 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf185, primals_139, buf251, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf185
del primals_139
buf252 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf176, primals_129, buf252, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf176
del primals_129
buf253 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf171, primals_123, buf253, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf171
del primals_123
buf254 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf169, primals_121, buf254, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf169
del primals_121
buf255 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf167, primals_119, buf255, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf167
del primals_119
buf256 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf158, primals_109, buf256, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf158
del primals_109
buf257 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf153, primals_103, buf257, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf153
del primals_103
buf258 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf151, primals_101, buf258, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf151
del primals_101
buf259 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf149, primals_99, buf259, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf149
del primals_99
buf260 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf140, primals_89, buf260, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf140
del primals_89
buf261 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf135, primals_83, buf261, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf135
del primals_83
buf262 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf133, primals_81, buf262, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf133
del primals_81
buf263 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf131, primals_79, buf263, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf131
del primals_79
buf264 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf122, primals_69, buf264, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf122
del primals_69
buf265 = empty_strided_cuda((4, 192, 30, 30), (172800, 1, 5760, 192
), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_54[grid(691200)](
buf117, primals_63, buf265, 691200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf117
del primals_63
buf268 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf103, primals_53, buf268, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf103
del primals_53
buf269 = empty_strided_cuda((4, 96, 61, 61), (357216, 1, 5856, 96),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_56[grid(1428864)](
buf101, primals_51, buf269, 1428864, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf101
del primals_51
buf270 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf96, primals_45, buf270, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf96
del primals_45
buf271 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf93, primals_41, buf271, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf93
del primals_41
buf272 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf91, primals_39, buf272, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf91
del primals_39
buf273 = empty_strided_cuda((4, 96, 61, 61), (357216, 1, 5856, 96),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_56[grid(1428864)](
buf89, primals_37, buf273, 1428864, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf89
del primals_37
buf274 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf84, primals_31, buf274, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf84
del primals_31
buf275 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf81, primals_27, buf275, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf81
del primals_27
buf276 = empty_strided_cuda((4, 32, 61, 61), (119072, 1, 1952, 32),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_57[grid(476288)](
buf79, primals_25, buf276, 476288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf79
del primals_25
buf277 = empty_strided_cuda((4, 96, 61, 61), (357216, 1, 5856, 96),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_56[grid(1428864)](
buf77, primals_23, buf277, 1428864, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf77
del primals_23
buf278 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf72, primals_17, buf278, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf72
del primals_17
buf279 = empty_strided_cuda((4, 64, 61, 61), (238144, 1, 3904, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_55[grid(952576)](
buf69, primals_13, buf279, 952576, XBLOCK=512, num_warps=8,
num_stages=1)
del buf69
del primals_13
return (buf235, buf0, buf1, buf2, primals_8, buf3, primals_12,
primals_14, buf4, primals_18, buf5, buf6, primals_24, primals_26,
primals_28, buf7, primals_32, buf8, buf9, primals_38, primals_40,
primals_42, buf10, primals_46, buf11, buf12, primals_52, buf13,
primals_56, buf14, buf15, primals_62, primals_64, buf16, buf17,
primals_70, buf18, buf19, buf20, buf21, primals_80, primals_82,
primals_84, buf22, buf23, primals_90, buf24, buf25, buf26, buf27,
primals_100, primals_102, primals_104, buf28, buf29, primals_110,
buf30, buf31, buf32, buf33, primals_120, primals_122, primals_124,
buf34, buf35, primals_130, buf36, buf37, buf38, buf39, primals_140,
primals_142, buf40, primals_146, buf41, buf42, buf43, primals_154,
primals_156, buf44, buf45, primals_162, buf46, buf47, buf48,
primals_170, primals_172, primals_174, buf49, buf50, primals_180,
buf51, buf52, buf53, primals_188, buf54, buf56, buf58, buf60, buf61,
buf62, buf64, buf66, buf67, buf68, buf71, buf74, buf76, buf78,
buf80, buf83, buf86, buf88, buf90, buf92, buf95, buf98, buf100,
buf102, buf104, buf107, buf109, buf112, buf116, buf119, buf121,
buf124, buf126, buf128, buf130, buf132, buf134, buf137, buf139,
buf142, buf144, buf146, buf148, buf150, buf152, buf155, buf157,
buf160, buf162, buf164, buf166, buf168, buf170, buf173, buf175,
buf178, buf180, buf182, buf184, buf186, buf188, buf190, buf193,
buf195, buf197, buf200, buf204, buf207, buf211, buf213, buf216,
buf218, buf221, buf225, buf227, buf230, buf232, reinterpret_tensor(
buf234, (4, 2048), (2048, 1), 0), primals_190, buf236, buf237,
buf238, buf239, buf240, buf241, buf242, buf243, buf244, buf245,
buf246, buf247, buf248, buf249, buf250, buf251, buf252, buf253,
buf254, buf255, buf256, buf257, buf258, buf259, buf260, buf261,
buf262, buf263, buf264, buf265, buf266, buf267, buf268, buf269,
buf270, buf271, buf272, buf273, buf274, buf275, buf276, buf277,
buf278, buf279)
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=True, **kwargs)
def forward(self, x):
x = self.conv(x)
return F.relu(x, inplace=True)
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features,
kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=
(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding
=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7),
padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7),
padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1),
padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3),
padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1),
padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.
branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class Inception3New(nn.Module):
def __init__(self, num_classes=1000, transform_input=True):
super(Inception3New, self).__init__()
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
def set_params(self, dict):
self.Conv2d_1a_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_1_w']))
self.Conv2d_1a_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_1_b']))
self.Conv2d_2a_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_2_w']))
self.Conv2d_2a_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_2_b']))
self.Conv2d_2b_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_3_w']))
self.Conv2d_2b_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_3_b']))
self.Conv2d_3b_1x1.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_4_w']))
self.Conv2d_3b_1x1.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_4_b']))
self.Conv2d_4a_3x3.conv.weight = nn.Parameter(torch.FloatTensor(
dict['layer1']['c_5_w']))
self.Conv2d_4a_3x3.conv.bias = nn.Parameter(torch.FloatTensor(dict[
'layer1']['c_5_b']))
self.Mixed_5b.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way1_w']))
self.Mixed_5b.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer2_1']['way1_b']))
self.Mixed_5b.branch5x5_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_1_w']))
self.Mixed_5b.branch5x5_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_1_b']))
self.Mixed_5b.branch5x5_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_2_w']))
self.Mixed_5b.branch5x5_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way2_2_b']))
self.Mixed_5b.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_1_w']))
self.Mixed_5b.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_1_b']))
self.Mixed_5b.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_2_w']))
self.Mixed_5b.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_2_b']))
self.Mixed_5b.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_3_w']))
self.Mixed_5b.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way3_3_b']))
self.Mixed_5b.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way4_w']))
self.Mixed_5b.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_1']['way4_b']))
self.Mixed_5c.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way1_w']))
self.Mixed_5c.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer2_2']['way1_b']))
self.Mixed_5c.branch5x5_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_1_w']))
self.Mixed_5c.branch5x5_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_1_b']))
self.Mixed_5c.branch5x5_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_2_w']))
self.Mixed_5c.branch5x5_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way2_2_b']))
self.Mixed_5c.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_1_w']))
self.Mixed_5c.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_1_b']))
self.Mixed_5c.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_2_w']))
self.Mixed_5c.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_2_b']))
self.Mixed_5c.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_3_w']))
self.Mixed_5c.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way3_3_b']))
self.Mixed_5c.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way4_w']))
self.Mixed_5c.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_2']['way4_b']))
self.Mixed_5d.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way1_w']))
self.Mixed_5d.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer2_3']['way1_b']))
self.Mixed_5d.branch5x5_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_1_w']))
self.Mixed_5d.branch5x5_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_1_b']))
self.Mixed_5d.branch5x5_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_2_w']))
self.Mixed_5d.branch5x5_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way2_2_b']))
self.Mixed_5d.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_1_w']))
self.Mixed_5d.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_1_b']))
self.Mixed_5d.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_2_w']))
self.Mixed_5d.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_2_b']))
self.Mixed_5d.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_3_w']))
self.Mixed_5d.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way3_3_b']))
self.Mixed_5d.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way4_w']))
self.Mixed_5d.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer2_3']['way4_b']))
self.Mixed_6a.branch3x3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way1_w']))
self.Mixed_6a.branch3x3.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer3']['way1_b']))
self.Mixed_6a.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_1_w']))
self.Mixed_6a.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_1_b']))
self.Mixed_6a.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_2_w']))
self.Mixed_6a.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_2_b']))
self.Mixed_6a.branch3x3dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_3_w']))
self.Mixed_6a.branch3x3dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer3']['way2_3_b']))
self.Mixed_6b.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way1_w']))
self.Mixed_6b.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_1']['way1_b']))
self.Mixed_6b.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_1_w']))
self.Mixed_6b.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_1_b']))
self.Mixed_6b.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_2_w']))
self.Mixed_6b.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_2_b']))
self.Mixed_6b.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_3_w']))
self.Mixed_6b.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way2_3_b']))
self.Mixed_6b.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_1_w']))
self.Mixed_6b.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_1_b']))
self.Mixed_6b.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_2_w']))
self.Mixed_6b.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_2_b']))
self.Mixed_6b.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_3_w']))
self.Mixed_6b.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_3_b']))
self.Mixed_6b.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_4_w']))
self.Mixed_6b.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_4_b']))
self.Mixed_6b.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_5_w']))
self.Mixed_6b.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way3_5_b']))
self.Mixed_6b.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way4_w']))
self.Mixed_6b.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_1']['way4_b']))
self.Mixed_6c.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way1_w']))
self.Mixed_6c.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_2']['way1_b']))
self.Mixed_6c.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_1_w']))
self.Mixed_6c.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_1_b']))
self.Mixed_6c.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_2_w']))
self.Mixed_6c.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_2_b']))
self.Mixed_6c.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_3_w']))
self.Mixed_6c.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way2_3_b']))
self.Mixed_6c.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_1_w']))
self.Mixed_6c.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_1_b']))
self.Mixed_6c.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_2_w']))
self.Mixed_6c.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_2_b']))
self.Mixed_6c.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_3_w']))
self.Mixed_6c.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_3_b']))
self.Mixed_6c.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_4_w']))
self.Mixed_6c.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_4_b']))
self.Mixed_6c.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_5_w']))
self.Mixed_6c.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way3_5_b']))
self.Mixed_6c.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way4_w']))
self.Mixed_6c.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_2']['way4_b']))
self.Mixed_6d.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way1_w']))
self.Mixed_6d.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_3']['way1_b']))
self.Mixed_6d.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_1_w']))
self.Mixed_6d.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_1_b']))
self.Mixed_6d.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_2_w']))
self.Mixed_6d.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_2_b']))
self.Mixed_6d.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_3_w']))
self.Mixed_6d.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way2_3_b']))
self.Mixed_6d.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_1_w']))
self.Mixed_6d.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_1_b']))
self.Mixed_6d.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_2_w']))
self.Mixed_6d.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_2_b']))
self.Mixed_6d.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_3_w']))
self.Mixed_6d.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_3_b']))
self.Mixed_6d.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_4_w']))
self.Mixed_6d.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_4_b']))
self.Mixed_6d.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_5_w']))
self.Mixed_6d.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way3_5_b']))
self.Mixed_6d.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way4_w']))
self.Mixed_6d.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_3']['way4_b']))
self.Mixed_6e.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way1_w']))
self.Mixed_6e.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer4_4']['way1_b']))
self.Mixed_6e.branch7x7_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_1_w']))
self.Mixed_6e.branch7x7_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_1_b']))
self.Mixed_6e.branch7x7_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_2_w']))
self.Mixed_6e.branch7x7_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_2_b']))
self.Mixed_6e.branch7x7_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_3_w']))
self.Mixed_6e.branch7x7_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way2_3_b']))
self.Mixed_6e.branch7x7dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_1_w']))
self.Mixed_6e.branch7x7dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_1_b']))
self.Mixed_6e.branch7x7dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_2_w']))
self.Mixed_6e.branch7x7dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_2_b']))
self.Mixed_6e.branch7x7dbl_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_3_w']))
self.Mixed_6e.branch7x7dbl_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_3_b']))
self.Mixed_6e.branch7x7dbl_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_4_w']))
self.Mixed_6e.branch7x7dbl_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_4_b']))
self.Mixed_6e.branch7x7dbl_5.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_5_w']))
self.Mixed_6e.branch7x7dbl_5.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way3_5_b']))
self.Mixed_6e.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way4_w']))
self.Mixed_6e.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer4_4']['way4_b']))
self.Mixed_7a.branch3x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_1_w']))
self.Mixed_7a.branch3x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_1_b']))
self.Mixed_7a.branch3x3_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_2_w']))
self.Mixed_7a.branch3x3_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way1_2_b']))
self.Mixed_7a.branch7x7x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_1_w']))
self.Mixed_7a.branch7x7x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_1_b']))
self.Mixed_7a.branch7x7x3_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_2_w']))
self.Mixed_7a.branch7x7x3_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_2_b']))
self.Mixed_7a.branch7x7x3_3.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_3_w']))
self.Mixed_7a.branch7x7x3_3.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_3_b']))
self.Mixed_7a.branch7x7x3_4.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_4_w']))
self.Mixed_7a.branch7x7x3_4.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer5']['way2_4_b']))
self.Mixed_7b.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way1_w']))
self.Mixed_7b.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer6_1']['way1_b']))
self.Mixed_7b.branch3x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way23_1_w']))
self.Mixed_7b.branch3x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way23_1_b']))
self.Mixed_7b.branch3x3_2a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way2_2_w']))
self.Mixed_7b.branch3x3_2a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way2_2_b']))
self.Mixed_7b.branch3x3_2b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way3_2_w']))
self.Mixed_7b.branch3x3_2b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way3_2_b']))
self.Mixed_7b.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_1_w']))
self.Mixed_7b.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_1_b']))
self.Mixed_7b.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_2_w']))
self.Mixed_7b.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way45_2_b']))
self.Mixed_7b.branch3x3dbl_3a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way4_3_w']))
self.Mixed_7b.branch3x3dbl_3a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way4_3_b']))
self.Mixed_7b.branch3x3dbl_3b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way5_3_w']))
self.Mixed_7b.branch3x3dbl_3b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way5_3_b']))
self.Mixed_7b.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way6_w']))
self.Mixed_7b.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_1']['way6_b']))
self.Mixed_7c.branch1x1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way1_w']))
self.Mixed_7c.branch1x1.conv.bias = nn.Parameter(torch.FloatTensor(
dict['layer6_2']['way1_b']))
self.Mixed_7c.branch3x3_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way23_1_w']))
self.Mixed_7c.branch3x3_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way23_1_b']))
self.Mixed_7c.branch3x3_2a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way2_2_w']))
self.Mixed_7c.branch3x3_2a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way2_2_b']))
self.Mixed_7c.branch3x3_2b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way3_2_w']))
self.Mixed_7c.branch3x3_2b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way3_2_b']))
self.Mixed_7c.branch3x3dbl_1.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_1_w']))
self.Mixed_7c.branch3x3dbl_1.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_1_b']))
self.Mixed_7c.branch3x3dbl_2.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_2_w']))
self.Mixed_7c.branch3x3dbl_2.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way45_2_b']))
self.Mixed_7c.branch3x3dbl_3a.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way4_3_w']))
self.Mixed_7c.branch3x3dbl_3a.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way4_3_b']))
self.Mixed_7c.branch3x3dbl_3b.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way5_3_w']))
self.Mixed_7c.branch3x3dbl_3b.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way5_3_b']))
self.Mixed_7c.branch_pool.conv.weight = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way6_w']))
self.Mixed_7c.branch_pool.conv.bias = nn.Parameter(torch.
FloatTensor(dict['layer6_2']['way6_b']))
self.fc.weight = nn.Parameter(torch.FloatTensor(dict['outputlayer']
['fc_w']))
self.fc.bias = nn.Parameter(torch.FloatTensor(dict['outputlayer'][
'fc_b']))
def forward(self, input_0):
primals_2 = self.Conv2d_1a_3x3.conv.weight
primals_3 = self.Conv2d_1a_3x3.conv.bias
primals_4 = self.Conv2d_2a_3x3.conv.weight
primals_5 = self.Conv2d_2a_3x3.conv.bias
primals_6 = self.Conv2d_2b_3x3.conv.weight
primals_7 = self.Conv2d_2b_3x3.conv.bias
primals_8 = self.Conv2d_3b_1x1.conv.weight
primals_9 = self.Conv2d_3b_1x1.conv.bias
primals_10 = self.Conv2d_4a_3x3.conv.weight
primals_11 = self.Conv2d_4a_3x3.conv.bias
primals_12 = self.Mixed_5b.branch1x1.conv.weight
primals_13 = self.Mixed_5b.branch1x1.conv.bias
primals_14 = self.Mixed_5b.branch5x5_1.conv.weight
primals_15 = self.Mixed_5b.branch5x5_1.conv.bias
primals_16 = self.Mixed_5b.branch5x5_2.conv.weight
primals_17 = self.Mixed_5b.branch5x5_2.conv.bias
primals_18 = self.Mixed_5b.branch3x3dbl_1.conv.weight
primals_19 = self.Mixed_5b.branch3x3dbl_1.conv.bias
primals_20 = self.Mixed_5b.branch3x3dbl_2.conv.weight
primals_21 = self.Mixed_5b.branch3x3dbl_2.conv.bias
primals_22 = self.Mixed_5b.branch3x3dbl_3.conv.weight
primals_23 = self.Mixed_5b.branch3x3dbl_3.conv.bias
primals_24 = self.Mixed_5b.branch_pool.conv.weight
primals_25 = self.Mixed_5b.branch_pool.conv.bias
primals_26 = self.Mixed_5c.branch1x1.conv.weight
primals_27 = self.Mixed_5c.branch1x1.conv.bias
primals_28 = self.Mixed_5c.branch5x5_1.conv.weight
primals_29 = self.Mixed_5c.branch5x5_1.conv.bias
primals_30 = self.Mixed_5c.branch5x5_2.conv.weight
primals_31 = self.Mixed_5c.branch5x5_2.conv.bias
primals_32 = self.Mixed_5c.branch3x3dbl_1.conv.weight
primals_33 = self.Mixed_5c.branch3x3dbl_1.conv.bias
primals_34 = self.Mixed_5c.branch3x3dbl_2.conv.weight
primals_35 = self.Mixed_5c.branch3x3dbl_2.conv.bias
primals_36 = self.Mixed_5c.branch3x3dbl_3.conv.weight
primals_37 = self.Mixed_5c.branch3x3dbl_3.conv.bias
primals_38 = self.Mixed_5c.branch_pool.conv.weight
primals_39 = self.Mixed_5c.branch_pool.conv.bias
primals_40 = self.Mixed_5d.branch1x1.conv.weight
primals_41 = self.Mixed_5d.branch1x1.conv.bias
primals_42 = self.Mixed_5d.branch5x5_1.conv.weight
primals_43 = self.Mixed_5d.branch5x5_1.conv.bias
primals_44 = self.Mixed_5d.branch5x5_2.conv.weight
primals_45 = self.Mixed_5d.branch5x5_2.conv.bias
primals_46 = self.Mixed_5d.branch3x3dbl_1.conv.weight
primals_47 = self.Mixed_5d.branch3x3dbl_1.conv.bias
primals_48 = self.Mixed_5d.branch3x3dbl_2.conv.weight
primals_49 = self.Mixed_5d.branch3x3dbl_2.conv.bias
primals_50 = self.Mixed_5d.branch3x3dbl_3.conv.weight
primals_51 = self.Mixed_5d.branch3x3dbl_3.conv.bias
primals_52 = self.Mixed_5d.branch_pool.conv.weight
primals_53 = self.Mixed_5d.branch_pool.conv.bias
primals_54 = self.Mixed_6a.branch3x3.conv.weight
primals_55 = self.Mixed_6a.branch3x3.conv.bias
primals_56 = self.Mixed_6a.branch3x3dbl_1.conv.weight
primals_57 = self.Mixed_6a.branch3x3dbl_1.conv.bias
primals_58 = self.Mixed_6a.branch3x3dbl_2.conv.weight
primals_59 = self.Mixed_6a.branch3x3dbl_2.conv.bias
primals_60 = self.Mixed_6a.branch3x3dbl_3.conv.weight
primals_61 = self.Mixed_6a.branch3x3dbl_3.conv.bias
primals_62 = self.Mixed_6b.branch1x1.conv.weight
primals_63 = self.Mixed_6b.branch1x1.conv.bias
primals_64 = self.Mixed_6b.branch7x7_1.conv.weight
primals_65 = self.Mixed_6b.branch7x7_1.conv.bias
primals_66 = self.Mixed_6b.branch7x7_2.conv.weight
primals_67 = self.Mixed_6b.branch7x7_2.conv.bias
primals_68 = self.Mixed_6b.branch7x7_3.conv.weight
primals_69 = self.Mixed_6b.branch7x7_3.conv.bias
primals_70 = self.Mixed_6b.branch7x7dbl_1.conv.weight
primals_71 = self.Mixed_6b.branch7x7dbl_1.conv.bias
primals_72 = self.Mixed_6b.branch7x7dbl_2.conv.weight
primals_73 = self.Mixed_6b.branch7x7dbl_2.conv.bias
primals_74 = self.Mixed_6b.branch7x7dbl_3.conv.weight
primals_75 = self.Mixed_6b.branch7x7dbl_3.conv.bias
primals_76 = self.Mixed_6b.branch7x7dbl_4.conv.weight
primals_77 = self.Mixed_6b.branch7x7dbl_4.conv.bias
primals_78 = self.Mixed_6b.branch7x7dbl_5.conv.weight
primals_79 = self.Mixed_6b.branch7x7dbl_5.conv.bias
primals_80 = self.Mixed_6b.branch_pool.conv.weight
primals_81 = self.Mixed_6b.branch_pool.conv.bias
primals_82 = self.Mixed_6c.branch1x1.conv.weight
primals_83 = self.Mixed_6c.branch1x1.conv.bias
primals_84 = self.Mixed_6c.branch7x7_1.conv.weight
primals_85 = self.Mixed_6c.branch7x7_1.conv.bias
primals_86 = self.Mixed_6c.branch7x7_2.conv.weight
primals_87 = self.Mixed_6c.branch7x7_2.conv.bias
primals_88 = self.Mixed_6c.branch7x7_3.conv.weight
primals_89 = self.Mixed_6c.branch7x7_3.conv.bias
primals_90 = self.Mixed_6c.branch7x7dbl_1.conv.weight
primals_91 = self.Mixed_6c.branch7x7dbl_1.conv.bias
primals_92 = self.Mixed_6c.branch7x7dbl_2.conv.weight
primals_93 = self.Mixed_6c.branch7x7dbl_2.conv.bias
primals_94 = self.Mixed_6c.branch7x7dbl_3.conv.weight
primals_95 = self.Mixed_6c.branch7x7dbl_3.conv.bias
primals_96 = self.Mixed_6c.branch7x7dbl_4.conv.weight
primals_97 = self.Mixed_6c.branch7x7dbl_4.conv.bias
primals_98 = self.Mixed_6c.branch7x7dbl_5.conv.weight
primals_99 = self.Mixed_6c.branch7x7dbl_5.conv.bias
primals_100 = self.Mixed_6c.branch_pool.conv.weight
primals_101 = self.Mixed_6c.branch_pool.conv.bias
primals_102 = self.Mixed_6d.branch1x1.conv.weight
primals_103 = self.Mixed_6d.branch1x1.conv.bias
primals_104 = self.Mixed_6d.branch7x7_1.conv.weight
primals_105 = self.Mixed_6d.branch7x7_1.conv.bias
primals_106 = self.Mixed_6d.branch7x7_2.conv.weight
primals_107 = self.Mixed_6d.branch7x7_2.conv.bias
primals_108 = self.Mixed_6d.branch7x7_3.conv.weight
primals_109 = self.Mixed_6d.branch7x7_3.conv.bias
primals_110 = self.Mixed_6d.branch7x7dbl_1.conv.weight
primals_111 = self.Mixed_6d.branch7x7dbl_1.conv.bias
primals_112 = self.Mixed_6d.branch7x7dbl_2.conv.weight
primals_113 = self.Mixed_6d.branch7x7dbl_2.conv.bias
primals_114 = self.Mixed_6d.branch7x7dbl_3.conv.weight
primals_115 = self.Mixed_6d.branch7x7dbl_3.conv.bias
primals_116 = self.Mixed_6d.branch7x7dbl_4.conv.weight
primals_117 = self.Mixed_6d.branch7x7dbl_4.conv.bias
primals_118 = self.Mixed_6d.branch7x7dbl_5.conv.weight
primals_119 = self.Mixed_6d.branch7x7dbl_5.conv.bias
primals_120 = self.Mixed_6d.branch_pool.conv.weight
primals_121 = self.Mixed_6d.branch_pool.conv.bias
primals_122 = self.Mixed_6e.branch1x1.conv.weight
primals_123 = self.Mixed_6e.branch1x1.conv.bias
primals_124 = self.Mixed_6e.branch7x7_1.conv.weight
primals_125 = self.Mixed_6e.branch7x7_1.conv.bias
primals_126 = self.Mixed_6e.branch7x7_2.conv.weight
primals_127 = self.Mixed_6e.branch7x7_2.conv.bias
primals_128 = self.Mixed_6e.branch7x7_3.conv.weight
primals_129 = self.Mixed_6e.branch7x7_3.conv.bias
primals_130 = self.Mixed_6e.branch7x7dbl_1.conv.weight
primals_131 = self.Mixed_6e.branch7x7dbl_1.conv.bias
primals_132 = self.Mixed_6e.branch7x7dbl_2.conv.weight
primals_133 = self.Mixed_6e.branch7x7dbl_2.conv.bias
primals_134 = self.Mixed_6e.branch7x7dbl_3.conv.weight
primals_135 = self.Mixed_6e.branch7x7dbl_3.conv.bias
primals_136 = self.Mixed_6e.branch7x7dbl_4.conv.weight
primals_137 = self.Mixed_6e.branch7x7dbl_4.conv.bias
primals_138 = self.Mixed_6e.branch7x7dbl_5.conv.weight
primals_139 = self.Mixed_6e.branch7x7dbl_5.conv.bias
primals_140 = self.Mixed_6e.branch_pool.conv.weight
primals_141 = self.Mixed_6e.branch_pool.conv.bias
primals_142 = self.Mixed_7a.branch3x3_1.conv.weight
primals_143 = self.Mixed_7a.branch3x3_1.conv.bias
primals_144 = self.Mixed_7a.branch3x3_2.conv.weight
primals_145 = self.Mixed_7a.branch3x3_2.conv.bias
primals_146 = self.Mixed_7a.branch7x7x3_1.conv.weight
primals_147 = self.Mixed_7a.branch7x7x3_1.conv.bias
primals_148 = self.Mixed_7a.branch7x7x3_2.conv.weight
primals_149 = self.Mixed_7a.branch7x7x3_2.conv.bias
primals_150 = self.Mixed_7a.branch7x7x3_3.conv.weight
primals_151 = self.Mixed_7a.branch7x7x3_3.conv.bias
primals_152 = self.Mixed_7a.branch7x7x3_4.conv.weight
primals_153 = self.Mixed_7a.branch7x7x3_4.conv.bias
primals_154 = self.Mixed_7b.branch1x1.conv.weight
primals_155 = self.Mixed_7b.branch1x1.conv.bias
primals_156 = self.Mixed_7b.branch3x3_1.conv.weight
primals_157 = self.Mixed_7b.branch3x3_1.conv.bias
primals_158 = self.Mixed_7b.branch3x3_2a.conv.weight
primals_159 = self.Mixed_7b.branch3x3_2a.conv.bias
primals_160 = self.Mixed_7b.branch3x3_2b.conv.weight
primals_161 = self.Mixed_7b.branch3x3_2b.conv.bias
primals_162 = self.Mixed_7b.branch3x3dbl_1.conv.weight
primals_163 = self.Mixed_7b.branch3x3dbl_1.conv.bias
primals_164 = self.Mixed_7b.branch3x3dbl_2.conv.weight
primals_165 = self.Mixed_7b.branch3x3dbl_2.conv.bias
primals_166 = self.Mixed_7b.branch3x3dbl_3a.conv.weight
primals_167 = self.Mixed_7b.branch3x3dbl_3a.conv.bias
primals_168 = self.Mixed_7b.branch3x3dbl_3b.conv.weight
primals_169 = self.Mixed_7b.branch3x3dbl_3b.conv.bias
primals_170 = self.Mixed_7b.branch_pool.conv.weight
primals_171 = self.Mixed_7b.branch_pool.conv.bias
primals_172 = self.Mixed_7c.branch1x1.conv.weight
primals_173 = self.Mixed_7c.branch1x1.conv.bias
primals_174 = self.Mixed_7c.branch3x3_1.conv.weight
primals_175 = self.Mixed_7c.branch3x3_1.conv.bias
primals_176 = self.Mixed_7c.branch3x3_2a.conv.weight
primals_177 = self.Mixed_7c.branch3x3_2a.conv.bias
primals_178 = self.Mixed_7c.branch3x3_2b.conv.weight
primals_179 = self.Mixed_7c.branch3x3_2b.conv.bias
primals_180 = self.Mixed_7c.branch3x3dbl_1.conv.weight
primals_181 = self.Mixed_7c.branch3x3dbl_1.conv.bias
primals_182 = self.Mixed_7c.branch3x3dbl_2.conv.weight
primals_183 = self.Mixed_7c.branch3x3dbl_2.conv.bias
primals_184 = self.Mixed_7c.branch3x3dbl_3a.conv.weight
primals_185 = self.Mixed_7c.branch3x3dbl_3a.conv.bias
primals_186 = self.Mixed_7c.branch3x3dbl_3b.conv.weight
primals_187 = self.Mixed_7c.branch3x3dbl_3b.conv.bias
primals_188 = self.Mixed_7c.branch_pool.conv.weight
primals_189 = self.Mixed_7c.branch_pool.conv.bias
primals_190 = self.fc.weight
primals_191 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170, primals_171, primals_172, primals_173, primals_174,
primals_175, primals_176, primals_177, primals_178, primals_179,
primals_180, primals_181, primals_182, primals_183, primals_184,
primals_185, primals_186, primals_187, primals_188, primals_189,
primals_190, primals_191])
return output[0]
|
Galaxies99/inception-cuda
|
Inception3
| false
| 11,638
|
[
"MIT"
] | 0
|
ed8fdbe3caef415e60b52e671273be90e9423e44
|
https://github.com/Galaxies99/inception-cuda/tree/ed8fdbe3caef415e60b52e671273be90e9423e44
|
MLP
|
import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLP(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLPNew(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
KoichiYasuoka/SuPar
|
MLP
| false
| 11,639
|
[
"MIT"
] | 0
|
9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
https://github.com/KoichiYasuoka/SuPar/tree/9bcf10fb946cae75b6a311d4cd19bec5bb1a9487
|
D_UpBlock
|
import torch
import torch.utils.data
from torchvision.transforms import *
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1,
padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size,
stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2,
padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size,
kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class D_UpBlock(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2,
num_stages=1, bias=True, activation='prelu', norm=None):
super(D_UpBlock, self).__init__()
self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0,
activation, norm=None)
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size,
stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size,
stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size,
stride, padding, activation, norm=None)
def forward(self, x):
x = self.conv(x)
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_filter': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp10 = tmp8 - tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp10 = tmp8 + tmp9
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(out_ptr0 + x3, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (1,), (1,))
assert_size_stride(primals_11, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1,
primals_2, primals_4, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(4, 4),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 16, 16), (1024, 256, 16, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused__prelu_kernel_convolution_1[grid(4096)](buf4,
primals_6, primals_7, buf5, 4096, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_6
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(4, 4),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf7,
primals_9, primals_10, buf2, buf8, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_9
buf9 = extern_kernels.convolution(buf8, primals_11, stride=(4, 4),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 16, 16), (1024, 256, 16, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1),
torch.float32)
triton_poi_fused__prelu_kernel_add_convolution_3[grid(4096)](buf10,
primals_12, primals_13, buf5, buf11, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_12
return (buf11, primals_1, primals_3, primals_4, primals_5, primals_7,
primals_8, primals_10, primals_11, primals_13, buf1, buf2, buf4,
buf5, buf7, buf8, buf10)
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1,
padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size,
stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2,
padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size,
kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class D_UpBlockNew(torch.nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2,
num_stages=1, bias=True, activation='prelu', norm=None):
super(D_UpBlockNew, self).__init__()
self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0,
activation, norm=None)
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size,
stride, padding, activation, norm=None)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size,
stride, padding, activation, norm=None)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size,
stride, padding, activation, norm=None)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_2 = self.conv.conv.bias
primals_4 = self.conv.act.weight
primals_5 = self.up_conv1.deconv.weight
primals_6 = self.up_conv1.deconv.bias
primals_7 = self.up_conv1.act.weight
primals_8 = self.up_conv2.conv.weight
primals_9 = self.up_conv2.conv.bias
primals_10 = self.up_conv2.act.weight
primals_11 = self.up_conv3.deconv.weight
primals_12 = self.up_conv3.deconv.bias
primals_13 = self.up_conv3.act.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
|
HamsterBiz/iSeeBetter
|
D_UpBlock
| false
| 11,640
|
[
"MIT"
] | 0
|
a71cee61583bdedab1f3b368e2cb7dc5ad969aed
|
https://github.com/HamsterBiz/iSeeBetter/tree/a71cee61583bdedab1f3b368e2cb7dc5ad969aed
|
LSTMClassCriterion
|
import torch
import torch.nn as nn
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LSTMClassCriterion(nn.Module):
def __init__(self):
super(LSTMClassCriterion, self).__init__()
def forward(self, pred, target, mask):
pred = pred.clone()
target = target.clone()
mask = mask.clone()
target = target[:, :pred.size(1)]
mask = mask[:, :pred.size(1)]
pred = to_contiguous(pred).view(-1, pred.size(2))
target = to_contiguous(target).view(-1, 1)
mask = to_contiguous(mask).view(-1, 1)
loss = -pred.gather(1, target) * mask
loss = torch.sum(loss) / torch.sum(mask)
_, idx = torch.max(pred, dim=1)
correct = idx.eq(torch.squeeze(target))
correct = correct.float() * torch.squeeze(mask)
accuracy = torch.sum(correct) / torch.sum(mask)
return loss, accuracy
def get_inputs():
return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_gather_max_mul_neg_sum_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + r0, None)
tmp39 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tl.full([1, 1], 0, tl.int64)
tmp5 = tl.full([1, 1], 1, tl.int64)
tmp6 = tmp4 < tmp5
tmp7 = tmp3 & tmp6
tmp8 = tmp2 | tmp7
tmp9 = tl.where(tmp8, tmp0, tmp1)
tmp10 = tl.where(tmp8, tmp4, tmp5)
tmp12 = tmp9 > tmp11
tmp13 = tmp9 == tmp11
tmp14 = tl.full([1, 1], 2, tl.int64)
tmp15 = tmp10 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tmp12 | tmp16
tmp18 = tl.where(tmp17, tmp9, tmp11)
tmp19 = tl.where(tmp17, tmp10, tmp14)
tmp21 = tmp18 > tmp20
tmp22 = tmp18 == tmp20
tmp23 = tl.full([1, 1], 3, tl.int64)
tmp24 = tmp19 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tmp21 | tmp25
tl.where(tmp26, tmp18, tmp20)
tmp28 = tl.where(tmp26, tmp19, tmp23)
tmp30 = tmp28 == tmp29
tmp31 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp32 = tmp29 + tmp31
tmp33 = tmp29 < 0
tmp34 = tl.where(tmp33, tmp32, tmp29)
tl.device_assert((0 <= tmp34) & (tmp34 < 4),
'index out of bounds: 0 <= tmp34 < 4')
tmp36 = tl.load(in_ptr0 + (tmp34 + 4 * r0), None, eviction_policy=
'evict_last')
tmp37 = -tmp36
tmp38 = tmp37.to(tl.float32)
tmp40 = tmp38 * tmp39
tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43 = tl.sum(tmp41, 1)[:, None]
tmp44 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp46 = tl.sum(tmp44, 1)[:, None]
tmp47 = tmp30.to(tl.float32)
tmp48 = tmp47 * tmp39
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.sum(tmp49, 1)[:, None]
tmp52 = tmp51 / tmp46
tmp53 = tmp43 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp52, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp53, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf6 = buf3
del buf3
buf5 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_gather_max_mul_neg_sum_0[grid(1)](buf6
, buf5, arg0_1, arg1_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf5, buf6
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LSTMClassCriterionNew(nn.Module):
def __init__(self):
super(LSTMClassCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
|
LeoZDong/shape2prog
|
LSTMClassCriterion
| false
| 11,641
|
[
"BSD-2-Clause"
] | 0
|
2185d1d4eb7a1c4c55e644c6af477fd8e8e70241
|
https://github.com/LeoZDong/shape2prog/tree/2185d1d4eb7a1c4c55e644c6af477fd8e8e70241
|
LSTMRegressCriterion
|
import torch
import torch.nn as nn
class LSTMRegressCriterion(nn.Module):
def __init__(self):
super(LSTMRegressCriterion, self).__init__()
def forward(self, pred, target, mask):
pred = pred.clone()
target = target.clone()
mask = mask.clone()
target = target[:, :pred.size(1), :]
mask = mask[:, :pred.size(1), :]
diff = 0.5 * (pred - target) ** 2
diff = diff * mask
output = torch.sum(diff) / torch.sum(mask)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tl.broadcast_to(tmp6, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = tmp10 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class LSTMRegressCriterionNew(nn.Module):
def __init__(self):
super(LSTMRegressCriterionNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
LeoZDong/shape2prog
|
LSTMRegressCriterion
| false
| 11,642
|
[
"BSD-2-Clause"
] | 0
|
2185d1d4eb7a1c4c55e644c6af477fd8e8e70241
|
https://github.com/LeoZDong/shape2prog/tree/2185d1d4eb7a1c4c55e644c6af477fd8e8e70241
|
ViTStemPatchify
|
from torch.nn import Module
import torch
import torch.utils.data
import torch.nn as nn
def patchify2d(w_in, w_out, k, *, bias=True):
"""Helper for building a patchify layer as used by ViT models."""
return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)
def patchify2d_cx(cx, w_in, w_out, k, *, bias=True):
"""Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts)."""
err_str = 'Only kernel sizes divisible by the input size are supported.'
assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
h, w = h // k, w // k
flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0)
params += k * k * w_in * w_out + (w_out if bias else 0)
acts += w_out * h * w
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ViTStemPatchify(Module):
"""The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929."""
def __init__(self, w_in, w_out, k):
super(ViTStemPatchify, self).__init__()
self.patchify = patchify2d(w_in, w_out, k, bias=True)
def forward(self, x):
return self.patchify(x)
@staticmethod
def complexity(cx, w_in, w_out, k):
return patchify2d_cx(cx, w_in, w_out, k, bias=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'w_in': 4, 'w_out': 4, 'k': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
def patchify2d(w_in, w_out, k, *, bias=True):
"""Helper for building a patchify layer as used by ViT models."""
return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)
def patchify2d_cx(cx, w_in, w_out, k, *, bias=True):
"""Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts)."""
err_str = 'Only kernel sizes divisible by the input size are supported.'
assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str
h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params'
], cx['acts']
h, w = h // k, w // k
flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0)
params += k * k * w_in * w_out + (w_out if bias else 0)
acts += w_out * h * w
return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts}
class ViTStemPatchifyNew(Module):
"""The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929."""
def __init__(self, w_in, w_out, k):
super(ViTStemPatchifyNew, self).__init__()
self.patchify = patchify2d(w_in, w_out, k, bias=True)
@staticmethod
def complexity(cx, w_in, w_out, k):
return patchify2d_cx(cx, w_in, w_out, k, bias=True)
def forward(self, input_0):
primals_1 = self.patchify.weight
primals_2 = self.patchify.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
LicharYuan/pycls
|
ViTStemPatchify
| false
| 11,643
|
[
"MIT"
] | 0
|
633529425f2c9ffadd892c1a0418b37891ee2d44
|
https://github.com/LicharYuan/pycls/tree/633529425f2c9ffadd892c1a0418b37891ee2d44
|
RegressionModel
|
import torch
import torch.nn as nn
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=
3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 576 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 36 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (36, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_11, (36,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 4, 4), (4096, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 256, 4, 4), (4096, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(16384)](buf5, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_0[grid(16384)](buf7, primals_9,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 36, 4, 4), (576, 16, 4, 1))
buf9 = empty_strided_cuda((4, 4, 4, 36), (576, 144, 36, 1), torch.
float32)
buf10 = reinterpret_tensor(buf9, (4, 144, 4), (576, 4, 1), 0)
del buf9
triton_poi_fused_clone_view_1[grid(64, 36)](buf10, buf8, primals_11,
64, 36, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1)
del buf8
del primals_11
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7)
class RegressionModelNew(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModelNew, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3,
padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3,
padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=
3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.output.weight
primals_11 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Hyojin021/auto_labeling
|
RegressionModel
| false
| 11,644
|
[
"Apache-2.0"
] | 0
|
1ccf0cd1c5adf34692751553a988aa0fcf4efefb
|
https://github.com/Hyojin021/auto_labeling/tree/1ccf0cd1c5adf34692751553a988aa0fcf4efefb
|
NormedLinear
|
import torch
import torch.nn.functional as F
from torch import nn
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).pow(
self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0[grid(256)](
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_pow_1[grid(16)](primals_1,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf1
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class NormedLinearNew(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs):
super(NormedLinearNew, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
LiuXiaoxuanPKU/mmdetection
|
NormedLinear
| false
| 11,645
|
[
"Apache-2.0"
] | 0
|
05b46eccbe5c4953d5a406f545fe529ce4e146d3
|
https://github.com/LiuXiaoxuanPKU/mmdetection/tree/05b46eccbe5c4953d5a406f545fe529ce4e146d3
|
MergeGate
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MergeGate(nn.Module):
def __init__(self, hidden_size):
super(MergeGate, self).__init__()
self.hidden_size = hidden_size
self.WSh = nn.Linear(hidden_size, hidden_size)
self.WSc = nn.Linear(hidden_size, hidden_size)
self.WSr = nn.Linear(hidden_size, hidden_size)
self.wS = nn.Linear(hidden_size, 1)
def forward(self, attn_applied_c, attn_applied_r, hidden):
content_c = self.WSc(attn_applied_c) + self.WSh(hidden.transpose(0, 1))
score_c = self.wS(F.tanh(content_c))
content_r = self.WSr(attn_applied_r) + self.WSh(hidden.transpose(0, 1))
score_r = self.wS(F.tanh(content_r))
gama_t = F.sigmoid(score_c - score_r)
c_t = gama_t * attn_applied_c + (1 - gama_t) * attn_applied_r
return c_t
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tmp10 = tmp8 + tmp9
tmp11 = tmp10 + tmp5
tmp12 = libdevice.tanh(tmp11)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(in_out_ptr1 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_sub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x2, xmask)
tmp12 = tl.load(in_ptr4 + x2, xmask)
tmp3 = tmp0 + tmp2
tmp5 = tmp4 + tmp2
tmp6 = tmp3 - tmp5
tmp7 = tl.sigmoid(tmp6)
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused_rsub_sigmoid_sigmoid_backward_sub_3(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tmp0 + tmp2
tmp5 = tmp4 + tmp2
tmp6 = tmp3 - tmp5
tmp7 = tl.sigmoid(tmp6)
tmp8 = 1.0
tmp9 = tmp8 - tmp7
tmp10 = tmp7 * tmp9
tl.store(in_out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_4, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_11, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5)
del primals_9
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_tanh_1[grid(256)](buf3, buf6, primals_2, buf2,
primals_6, primals_10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_10
del primals_2
del primals_6
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf4)
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf7)
buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_mul_rsub_sigmoid_sub_2[grid(256)](buf4,
primals_8, buf7, primals_3, primals_11, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_rsub_sigmoid_sigmoid_backward_sub_3[grid(64)](buf9,
primals_8, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf7
del primals_8
return buf8, primals_3, primals_11, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), buf3, buf6, buf9, primals_7
class MergeGateNew(nn.Module):
def __init__(self, hidden_size):
super(MergeGateNew, self).__init__()
self.hidden_size = hidden_size
self.WSh = nn.Linear(hidden_size, hidden_size)
self.WSc = nn.Linear(hidden_size, hidden_size)
self.WSr = nn.Linear(hidden_size, hidden_size)
self.wS = nn.Linear(hidden_size, 1)
def forward(self, input_0, input_1, input_2):
primals_1 = self.WSh.weight
primals_2 = self.WSh.bias
primals_5 = self.WSc.weight
primals_6 = self.WSc.bias
primals_9 = self.WSr.weight
primals_10 = self.WSr.bias
primals_7 = self.wS.weight
primals_8 = self.wS.bias
primals_3 = input_0
primals_4 = input_1
primals_11 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
LeiShenVictoria/Static-Dynamic-Attention-CNNRNN
|
MergeGate
| false
| 11,646
|
[
"MIT"
] | 0
|
e2823717d22c9e543428d471ff19113bbb59ebfe
|
https://github.com/LeiShenVictoria/Static-Dynamic-Attention-CNNRNN/tree/e2823717d22c9e543428d471ff19113bbb59ebfe
|
Actor
|
import torch
import torch.nn as nn
import torch as t
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, action_range):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_dim)
self.action_range = action_range
def forward(self, state):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
a = t.tanh(self.fc3(a)) * self.action_range
return a
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'action_range': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = 4.0
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 16), (16, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16), (16, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf7, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0),
reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3,
primals_5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(
buf3, (64, 16), (16, 1), 0), buf4, primals_6, buf6, primals_4, buf7
class ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, action_range):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_dim)
self.action_range = action_range
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
LeonLester/Machin-title-in-progress-
|
Actor
| false
| 11,647
|
[
"MIT"
] | 0
|
777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
https://github.com/LeonLester/Machin-title-in-progress-/tree/777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
Critic
|
import torch
import torch.nn as nn
import torch as t
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim + action_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
def forward(self, state, action):
state_action = t.cat([state, action], 1)
q = t.relu(self.fc1(state_action))
q = t.relu(self.fc2(q))
q = self.fc3(q)
return q
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (16, 8), (8, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 16), (16, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (1, 16), (16, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 16), (1,
8), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_relu_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (16, 16), (1,
16), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_1[grid(64)](buf4, primals_6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(16, 1), (1, 16), 0), alpha=1, beta=1, out=buf6)
del primals_8
return buf6, buf0, buf2, buf4, primals_7, primals_5
class CriticNew(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim + action_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 1)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
LeonLester/Machin-title-in-progress-
|
Critic
| false
| 11,648
|
[
"MIT"
] | 0
|
777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
https://github.com/LeonLester/Machin-title-in-progress-/tree/777479d47b520dcdc6b09c247591b5fe1d6cbe8c
|
NormedConv2d
|
import torch
from torch import nn
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06,
norm_over_kernel=False, **kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).
pow(self.power) + self.eps)
else:
weight_ = self.weight / (self.weight.view(self.weight.size(0),
-1).norm(dim=1, keepdim=True).pow(self.power)[..., None,
None] + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
elif torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_pow_0[grid(256)](primals_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1[grid(256)](
primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, primals_1, buf0, buf1
class NormedConv2dNew(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06,
norm_over_kernel=False, **kwargs):
super(NormedConv2dNew, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
LiuXiaoxuanPKU/mmdetection
|
NormedConv2d
| false
| 11,649
|
[
"Apache-2.0"
] | 0
|
05b46eccbe5c4953d5a406f545fe529ce4e146d3
|
https://github.com/LiuXiaoxuanPKU/mmdetection/tree/05b46eccbe5c4953d5a406f545fe529ce4e146d3
|
conv_head_pooling
|
import torch
import torch.nn as nn
import torch.autograd
class conv_head_pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(conv_head_pooling, self).__init__()
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.avgpool = nn.AvgPool2d(3, 2, 1)
self.conv1 = nn.Conv2d(in_feature, out_feature, 1, 1)
self.conv2 = nn.Conv2d(in_feature, out_feature, 1, 1)
self.conv3 = nn.Conv2d(2 * out_feature, out_feature, 1, 1)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
max = self.maxpool(x)
min = -self.maxpool(-x)
avg = self.avgpool(x)
avg = avg - max - min
max_2 = self.conv1(max)
avg_2 = self.conv2(max)
x = torch.cat([avg_2, max_2], dim=1)
x = self.conv3(x)
x = x + max_2
cls_token = self.fc(cls_token)
return x, cls_token
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x3), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x3), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x3), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x3), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tl.store(out_ptr0 + x4, tmp51, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp10 & xmask,
other=0.0)
tmp14 = tl.load(in_ptr3 + (-4 + x1), tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](primals_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1))
buf3 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32)
triton_poi_fused_cat_1[grid(128)](buf2, primals_5, buf1, primals_3,
buf3, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
buf5 = buf1
del buf1
triton_poi_fused_add_convolution_2[grid(64)](buf5, buf4, primals_7,
primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf4
del primals_3
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(primals_10, (64,
4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf6)
del primals_8
del primals_9
return buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, primals_4, primals_6, buf0, buf3, reinterpret_tensor(
primals_10, (64, 4), (4, 1), 0)
class conv_head_poolingNew(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(conv_head_poolingNew, self).__init__()
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.avgpool = nn.AvgPool2d(3, 2, 1)
self.conv1 = nn.Conv2d(in_feature, out_feature, 1, 1)
self.conv2 = nn.Conv2d(in_feature, out_feature, 1, 1)
self.conv3 = nn.Conv2d(2 * out_feature, out_feature, 1, 1)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, input_0, input_1):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc.weight
primals_9 = self.fc.bias
primals_1 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
|
LeiZhang1998/TransReID
|
conv_head_pooling
| false
| 11,650
|
[
"MIT"
] | 0
|
5a3f140633e3418c7cff2603ff2e814b9ab466ac
|
https://github.com/LeiZhang1998/TransReID/tree/5a3f140633e3418c7cff2603ff2e814b9ab466ac
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.