repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/autocast_test_lists.py | import torch
class AutocastCPUTestLists(object):
# Supplies ops and arguments for test_autocast_* in test/test_cpu.py
def __init__(self, dev):
super().__init__()
n = 8
# Utility arguments, created as one-element tuples
pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
mat3_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
dummy_bf16 = [
(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
for dimset in dummy_dimsets
]
dummy_fp16 = [
(torch.randn(dimset, dtype=torch.float16, device=dev),)
for dimset in dummy_dimsets
]
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
conv_args_bf16 = [
(
torch.randn(dimset, dtype=torch.bfloat16, device=dev),
torch.randn(dimset, dtype=torch.bfloat16, device=dev),
)
for dimset in dimsets
]
conv_args_fp16 = [
(
torch.randn(dimset, dtype=torch.float16, device=dev),
torch.randn(dimset, dtype=torch.float16, device=dev),
)
for dimset in dimsets
]
conv_args_fp32 = [
(
torch.randn(dimset, dtype=torch.float32, device=dev),
torch.randn(dimset, dtype=torch.float32, device=dev),
)
for dimset in dimsets
]
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
bias_fp16 = (torch.randn((n,), dtype=torch.float16, device=dev),)
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
dummy_fp32 = [
(torch.randn(dimset, dtype=torch.float32, device=dev),)
for dimset in dummy_dimsets
]
# The lists below organize ops that autocast needs to test.
# self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
# Each op is associated with a tuple of valid arguments.
# Some ops implement built-in type promotion. These don't need autocasting,
# but autocasting relies on their promotion, so we include tests to double-check.
self.torch_expect_builtin_promote_bf16 = [
("eq", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("ge", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("gt", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("le", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("lt", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("ne", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("add", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("div", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("mul", pointwise0_fp32 + pointwise1_bf16, torch.float32),
]
self.torch_expect_builtin_promote_fp16 = [
("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
]
self.methods_expect_builtin_promote_bf16 = [
("__eq__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__ge__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__gt__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__le__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__lt__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__ne__", pointwise0_fp32 + pointwise1_bf16, torch.bool),
("__add__", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("__div__", pointwise0_fp32 + pointwise1_bf16, torch.float32),
("__mul__", pointwise0_fp32 + pointwise1_bf16, torch.float32),
]
self.methods_expect_builtin_promote_fp16 = [
("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
]
# The remaining lists organize ops that autocast treats explicitly
self.fft_fp32 = []
self.special_fp32 = []
self.linalg_fp32 = [
("linalg_matrix_rank", dummy_bf16[2]),
]
self.blacklist_non_float_output_pass_test = []
# The remaining lists organize ops that autocast treats explicitly for bf16.
self.torch_need_autocast_promote_bf16 = [
("cat", (pointwise0_bf16 + pointwise1_fp32,)),
("stack", (pointwise0_bf16 + pointwise1_fp32,)),
(
"index_copy",
(
torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.bfloat16
),
0,
torch.tensor([0, 1, 2]),
torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float),
),
),
]
self.torch_bf16 = [
("conv1d", conv_args_fp32[0]),
("conv2d", conv_args_fp32[1]),
("conv3d", conv_args_fp32[2]),
("conv_transpose1d", conv_args_fp32[0]),
("conv_transpose2d", conv_args_fp32[1]),
("conv_transpose3d", conv_args_fp32[2]),
(
"bmm",
(
torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32),
),
),
("mm", mat0_fp32 + mat1_fp32),
(
"baddbmm",
(
torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32),
),
),
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
(
"addbmm",
mat0_fp32
+ (
torch.randn((n, n, n), device=dev, dtype=torch.float32),
torch.randn((n, n, n), device=dev, dtype=torch.float32),
),
),
(
"group_norm",
(
torch.randn((4, 8, 10, 10), device=dev, dtype=torch.float32),
4,
torch.randn(8, device=dev, dtype=torch.float32),
torch.randn(8, device=dev, dtype=torch.float32),
1e-5,
True,
),
),
]
self.torch_bf16_multi_output = [
(
"_native_multi_head_attention",
(
torch.randn((1, 1, 768), device=dev, dtype=torch.float32),
torch.randn((1, 1, 768), device=dev, dtype=torch.float32),
torch.randn((1, 1, 768), device=dev, dtype=torch.float32),
768,
12,
torch.randn((2304, 768), device=dev, dtype=torch.float32),
torch.randn((2304), device=dev, dtype=torch.float32),
torch.randn((768, 768), device=dev, dtype=torch.float32),
torch.randn((768), device=dev, dtype=torch.float32),
None,
False,
True,
1,
),
),
(
"_native_multi_head_attention",
(
torch.randn((1, 2, 768), device=dev, dtype=torch.float32),
torch.randn((1, 2, 768), device=dev, dtype=torch.float32),
torch.randn((1, 2, 768), device=dev, dtype=torch.float32),
768,
12,
torch.randn((2304, 768), device=dev, dtype=torch.float32),
torch.randn((2304), device=dev, dtype=torch.float32),
torch.randn((768, 768), device=dev, dtype=torch.float32),
torch.randn((768), device=dev, dtype=torch.float32),
torch.Tensor([[False, True]]),
False,
True,
1,
),
),
(
"_transform_bias_rescale_qkv",
(
torch.randn((1, 96, 1536), device=dev, dtype=torch.float32),
torch.randn((1536), device=dev, dtype=torch.float32),
8,
),
),
]
self.torch_bf16_fp32 = [
("_adaptive_avg_pool3d", dummy_bf16[3], {"output_size": (4, 4, 4)}),
]
self.nn_bf16 = [
("linear", mat0_fp32 + mat1_fp32),
]
self.nn_bf16_fp32 = [
(
"avg_pool3d",
dummy_bf16[3],
{"kernel_size": (3, 3, 3), "stride": (1, 1, 1)},
),
("adaptive_avg_pool3d", dummy_bf16[3], {"output_size": (4, 4, 4)}),
]
self.torch_bf16_fp32_multi_output = []
self.nn_bf16_fp32_multi_output = []
# The remaining lists organize ops that autocast treats explicitly for fp16.
self.torch_need_autocast_promote_fp16 = [
("cat", (pointwise0_fp16 + pointwise1_fp32,)),
("stack", (pointwise0_fp16 + pointwise1_fp32,)),
(
"index_copy",
(
torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float16
),
0,
torch.tensor([0, 1, 2]),
torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float),
),
),
]
self.torch_fp16 = []
self.torch_fp16_fp32_multi_output = [
(
"_native_multi_head_attention",
(
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
768,
12,
torch.randn((2304, 768), device=dev, dtype=torch.float16),
torch.randn((2304), device=dev, dtype=torch.float16),
torch.randn((768, 768), device=dev, dtype=torch.float16),
torch.randn((768), device=dev, dtype=torch.float16),
None,
False,
True,
),
),
(
"_native_multi_head_attention",
(
torch.randn((1, 2, 768), device=dev, dtype=torch.float16),
torch.randn((1, 2, 768), device=dev, dtype=torch.float16),
torch.randn((1, 2, 768), device=dev, dtype=torch.float16),
768,
12,
torch.randn((2304, 768), device=dev, dtype=torch.float16),
torch.randn((2304), device=dev, dtype=torch.float16),
torch.randn((768, 768), device=dev, dtype=torch.float16),
torch.randn((768), device=dev, dtype=torch.float16),
torch.Tensor([[False, True]]),
False,
True,
1,
),
),
(
"_transform_bias_rescale_qkv",
(
torch.randn((1, 96, 1536), device=dev, dtype=torch.float16),
torch.randn((1536), device=dev, dtype=torch.float16),
8,
),
),
]
self.nn_fp16 = []
self.torch_fp16_fp32 = [
("conv1d", conv_args_fp16[0]),
("conv2d", conv_args_fp16[1]),
("conv3d", conv_args_fp16[2]),
("conv_tbc", conv_args_fp16[0] + bias_fp16),
(
"_convolution",
conv_args_fp32[1]
+ bias_fp16
+ ((1, 1), (0, 0), (1, 1), False, (0, 0), 1, False, True, True),
),
(
"bmm",
(
torch.randn((n, n, n), device=dev, dtype=torch.float16),
torch.randn((n, n, n), device=dev, dtype=torch.float16),
),
),
("mm", mat0_fp16 + mat1_fp16),
("addmm", mat1_fp16 + mat2_fp16 + mat3_fp16),
(
"addbmm",
mat0_fp16
+ (
torch.randn((n, n, n), device=dev, dtype=torch.float16),
torch.randn((n, n, n), device=dev, dtype=torch.float16),
),
),
(
"baddbmm",
(
torch.randn((n, n, n), device=dev, dtype=torch.float16),
torch.randn((n, n, n), device=dev, dtype=torch.float16),
torch.randn((n, n, n), device=dev, dtype=torch.float16),
),
),
("matmul", mat0_fp16 + mat1_fp16),
("conv_transpose1d", conv_args_fp16[0]),
("conv_transpose2d", conv_args_fp16[1]),
("conv_transpose3d", conv_args_fp16[2]),
(
"group_norm",
(
torch.randn((4, 8, 10, 10), device=dev, dtype=torch.float16),
4,
torch.randn(8, device=dev, dtype=torch.float16),
torch.randn(8, device=dev, dtype=torch.float16),
1e-5,
True,
),
),
(
"batch_norm",
dummy_fp16[2],
{
"weight": None,
"bias": None,
"running_mean": torch.rand((n), dtype=torch.float32),
"running_var": torch.rand((n), dtype=torch.float32),
"training": False,
"momentum": 0.1,
"eps": 1e-5,
"cudnn_enabled": False,
},
),
("avg_pool1d", dummy_fp16[2], {"kernel_size": 3, "stride": 1}),
(
"max_pool1d",
(torch.randn(10, 10, 10).to(torch.float16), 3, 2, 0, 1, False),
),
(
"max_pool2d",
(torch.randn(10, 10, 10, 10).to(torch.float16), 3, 2, 0, 1, False),
),
(
"max_pool3d",
(torch.randn(10, 10, 10, 10, 10).to(torch.float16), 3, 2, 0, 1, False),
),
("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
("dropout", mat0_fp16 + (0.5,) + (False,)),
("softmax", pointwise0_fp16 + (0,)),
("log_softmax", pointwise0_fp16 + (0,)),
("cumsum", pointwise0_fp16 + (0,)),
("addcdiv", pointwise0_fp16 + pointwise1_fp16 + pointwise2_fp16),
("addcmul", pointwise0_fp16 + pointwise1_fp16 + pointwise2_fp16),
(
"_scaled_dot_product_attention_math",
(
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
),
),
("topk", pointwise0_fp16 + (2,)),
("_adaptive_avg_pool3d", dummy_fp16[3], {"output_size": (4, 4, 4)}),
]
self.nn_fp16_fp32 = [
("linear", mat0_fp16 + mat1_fp16),
("avg_pool2d", dummy_fp16[2], {"kernel_size": (3, 2), "stride": (1, 1)}),
(
"avg_pool3d",
dummy_fp16[3],
{"kernel_size": (3, 3, 3), "stride": (1, 1, 1)},
),
(
"scaled_dot_product_attention",
(
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
torch.randn((1, 1, 768), device=dev, dtype=torch.float16),
),
),
("adaptive_avg_pool2d", dummy_fp16[2], {"output_size": (4, 4)}),
("adaptive_avg_pool3d", dummy_fp16[3], {"output_size": (4, 4, 4)}),
("upsample_nearest1d", dummy_fp16[2], {"output_size": (n)}),
("upsample_nearest2d", dummy_fp16[3], {"output_size": (n, n)}),
("upsample_nearest3d", dummy_fp16[4], {"output_size": (n, n, n)}),
(
"upsample_linear1d",
dummy_fp16[2],
{"output_size": (n), "align_corners": False},
),
(
"upsample_bilinear2d",
dummy_fp16[3],
{"output_size": (n, n), "align_corners": False},
),
(
"_upsample_bilinear2d_aa",
dummy_fp16[3],
{"output_size": (n, n), "align_corners": False},
),
(
"upsample_trilinear3d",
dummy_fp16[4],
{"output_size": (n, n, n), "align_corners": False},
),
]
self.torch_fallthrough_bf16 = [
("softmax", pointwise0_bf16 + (0,)),
("log_softmax", pointwise0_bf16 + (0,)),
("topk", pointwise0_bf16 + (2,)),
("layer_norm", pointwise0_bf16 + ((pointwise0_bf16[0].numel(),),)),
("dropout", mat0_bf16 + (0.5,) + (False,)),
("cumsum", pointwise0_bf16 + (0,)),
("addcdiv", pointwise0_bf16 + pointwise1_bf16 + pointwise2_bf16),
("addcmul", pointwise0_bf16 + pointwise1_bf16 + pointwise2_bf16),
(
"_scaled_dot_product_attention_math",
(
torch.randn((1, 1, 768), device=dev, dtype=torch.bfloat16),
torch.randn((1, 1, 768), device=dev, dtype=torch.bfloat16),
torch.randn((1, 1, 768), device=dev, dtype=torch.bfloat16),
),
),
]
self.nn_fallthrough_bf16 = [
(
"scaled_dot_product_attention",
(
torch.randn((1, 1, 768), device=dev, dtype=torch.bfloat16),
torch.randn((1, 1, 768), device=dev, dtype=torch.bfloat16),
torch.randn((1, 1, 768), device=dev, dtype=torch.bfloat16),
),
),
("upsample_nearest1d", dummy_bf16[2], {"output_size": (n)}),
("upsample_nearest2d", dummy_bf16[3], {"output_size": (n, n)}),
("upsample_nearest3d", dummy_bf16[4], {"output_size": (n, n, n)}),
(
"upsample_linear1d",
dummy_bf16[2],
{"output_size": (n), "align_corners": False},
),
(
"upsample_bilinear2d",
dummy_bf16[3],
{"output_size": (n, n), "align_corners": False},
),
(
"_upsample_bilinear2d_aa",
dummy_bf16[3],
{"output_size": (n, n), "align_corners": False},
),
(
"upsample_trilinear3d",
dummy_bf16[4],
{"output_size": (n, n, n), "align_corners": False},
),
("adaptive_avg_pool2d", dummy_bf16[2], {"output_size": (4, 4)}),
]
| 22,097 | 42.671937 | 89 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_fpmath_mode.py | import unittest
import os
import subprocess
from torch.testing._internal.common_utils import TestCase
import intel_extension_for_pytorch as ipex
import itertools
from functools import wraps
def fpmath_mode_env(func):
@wraps(func)
def wrapTheFunction(*args):
func(*args)
# set the fp32_math_mode back to FP32 after each UT
ipex.set_fp32_math_mode()
return wrapTheFunction
class TestFPMathCases(TestCase):
@fpmath_mode_env
def test_set_and_get_fpmath(self):
fpmath_mode = [ipex.FP32MathMode.BF32, ipex.FP32MathMode.FP32]
for mode in fpmath_mode:
ipex.set_fp32_math_mode(mode=mode, device="cpu")
assert (
ipex.get_fp32_math_mode() == mode
), "The return value of get_fpmath_mode is different from the value passed to set_fpmath_mode."
ipex.set_fp32_math_mode()
assert (
ipex.get_fp32_math_mode() == ipex.FP32MathMode.FP32
), "The default fp32_math_mode should be LowFP32MathMode.FP32."
@fpmath_mode_env
def test_fpmath_bf32(self):
modes = ["jit", "imperative"]
bias = [True, False]
for mode, b in itertools.product(modes, bias):
num1, num2, num3, num4, num5, num6, num7 = 0, 0, 0, 0, 0, 0, 0
loc = os.path.dirname(os.path.abspath(__file__))
cmd = 'DNNL_VERBOSE=1 python -u {}/fpmath_mode.py --mode="{}" --fpmath="BF32" --bias={}'.format(
loc, mode, b
)
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if "attr-fpmath:bf16" in line and "convolution" in line:
num1 = num1 + 1
if "backward" in line:
num4 = num4 + 1
if "attr-fpmath:bf16" in line and "inner_product" in line:
num2 = num2 + 1
if "backward" in line:
num5 = num5 + 1
if "attr-fpmath:bf16" in line and "matmul" in line:
num3 = num3 + 1
if "attr-fpmath:bf16" in line and "rnn" in line:
num6 = num6 + 1
if "backward" in line:
num7 = num7 + 1
assert (
num1 > 0 and num2 > 0 and num3 > 0 and num6 > 0
), "The implicit FP32 to BF16 data type conversion failed to enable."
if mode == "imperative":
assert (
num4 > 0 and num5 > 0 and num3 >= 3 and num7 > 0
), "The implicit FP32 to BF16 data type conversion failed to enable in backward pass."
@fpmath_mode_env
def test_fpmath_strict(self):
modes = ["jit", "imperative"]
bias = [True, False]
for mode, b in itertools.product(modes, bias):
num = 0
loc = os.path.dirname(os.path.abspath(__file__))
cmd = 'DNNL_VERBOSE=1 python -u {}/fpmath_mode.py --mode="{}" --fpmath="FP32" --bias={}'.format(
loc, mode, b
)
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if "attr-fpmath:bf16" in line:
num = num + 1
assert (
num == 0
), "The implicit FP32 to BF16 data type conversion failed to disable."
@fpmath_mode_env
def test_env(self):
os.environ["IPEX_FP32_MATH_MODE"] = "BF32"
modes = ["jit", "imperative"]
bias = [True, False]
for mode, b in itertools.product(modes, bias):
num1, num2, num3, num4 = 0, 0, 0, 0
loc = os.path.dirname(os.path.abspath(__file__))
cmd = 'DNNL_VERBOSE=1 python -u {}/fpmath_mode.py --mode="{}" --env --bias={}'.format(
loc, mode, b
)
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if "attr-fpmath:bf16" in line and "convolution" in line:
num1 = num1 + 1
if "attr-fpmath:bf16" in line and "inner_product" in line:
num2 = num2 + 1
if "attr-fpmath:bf16" in line and "matmul" in line:
num3 = num3 + 1
if "attr-fpmath:bf16" in line and "rnn" in line:
num4 = num4 + 1
assert (
num1 > 0 and num2 > 0 and num3 > 0 and num4 > 0
), "The implicit FP32 to BF16 data type conversion failed to enable."
if __name__ == "__main__":
test = unittest.main()
| 5,133 | 40.739837 | 108 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_merged_embeddingbag.py | import torch
import torch.nn as nn
import unittest
import copy
from torch.testing._internal.common_utils import TestCase
from intel_extension_for_pytorch.nn.modules import (
MergedEmbeddingBagWithSGD as MergedEmbeddingBagWithSGD,
)
from intel_extension_for_pytorch.nn.modules import MergedEmbeddingBag
class TestMergedEmbeddingBagWithSGD(TestCase):
# table 0, 1, 4 used for inference only
# table 0, 1, 2, 3 used for other test except inference only
table0 = nn.EmbeddingBag(100, 16, mode="mean").double()
table1 = nn.EmbeddingBag(50, 32, mode="sum")
table2 = nn.EmbeddingBag(50, 65, mode="mean", include_last_offset=True).bfloat16()
table3 = nn.EmbeddingBag(
18000000, 8, mode="mean", include_last_offset=True
).bfloat16()
table4 = nn.EmbeddingBag(10, 8, mode="sum", include_last_offset=True).bfloat16()
merged = MergedEmbeddingBagWithSGD.from_embeddingbag_list(
[table0, table1, table2, table3]
)
merged2 = MergedEmbeddingBagWithSGD(
[
(100, 16, "mean", table0.weight.dtype, table0.weight.detach(), False),
(50, 32, "sum", table1.weight.dtype, table1.weight.detach(), True),
(50, 8, "mean", table2.weight.dtype, table2.weight.detach(), True),
(18000000, 8, "mean", table3.weight.dtype, table3.weight.detach(), True),
]
)
inference_only_merged = MergedEmbeddingBagWithSGD.from_embeddingbag_list(
[table0, table1, table4]
)
input = [
[
torch.LongTensor([10, 10, 15, 10, 20, 25]),
torch.LongTensor([[0, 30], [21, 15], [30, 11]]),
torch.LongTensor([[0], [10], [20]]),
torch.LongTensor([10, 15, 20, 17999999]),
],
[torch.LongTensor([0, 1, 3]), None, None, torch.LongTensor([0, 2, 3, 4])],
[t.include_last_offset for t in [table0, table1, table2, table3]],
]
expected_input = (
torch.LongTensor(
[
10,
10,
15,
10,
20,
25,
0,
30,
21,
15,
30,
11,
0,
10,
20,
10,
15,
20,
17999999,
]
),
torch.LongTensor([0, 1, 3, 6, 8, 10, 12, 13, 14, 15, 17, 18, 19]),
torch.LongTensor(
[
10,
10,
15,
10,
20,
25,
100,
130,
121,
115,
130,
111,
150,
160,
170,
210,
215,
220,
18000199,
]
),
)
inference_only_input = [
[
torch.LongTensor([10, 10, 15, 10, 20, 25]),
torch.LongTensor([[0, 30], [21, 15], [30, 11]]),
torch.LongTensor([2, 5, 4, 9]),
],
[torch.LongTensor([0, 1, 3]), None, torch.LongTensor([0, 2, 3, 4])],
[
table0.include_last_offset,
table1.include_last_offset,
table2.include_last_offset,
],
]
inference_only_expected_input = (
torch.LongTensor([10, 10, 15, 10, 20, 25, 0, 30, 21, 15, 30, 11, 2, 5, 4, 9]),
torch.LongTensor([0, 1, 3, 6, 8, 10, 12, 14, 15, 16]),
torch.LongTensor(
[10, 10, 15, 10, 20, 25, 100, 130, 121, 115, 130, 111, 102, 105, 104, 109]
),
)
expected_indices_weight_for_update = {
10: 1 + 1 / 2 + 1 / 3,
15: 1 / 2,
20: 1 / 3,
25: 1 / 3,
100: 1,
111: 1,
115: 1,
121: 1,
130: 2,
150: 1,
160: 1,
160: 1,
210: 1 / 2,
215: 1 / 2,
220: 1,
18000199: 1,
}
def test_create_from_embedingbaglist_vs_create_from_init_function(self):
self.assertEqual(self.merged.weights, self.merged2.weights)
self.assertEqual(self.merged.pooling_modes, self.merged2.pooling_modes)
self.assertEqual(self.merged.dtypes, self.merged2.dtypes)
def test_input_prepare_function(self):
(
merged_indices,
merged_offsets,
merged_indices_with_row_offsets,
) = self.merged.linearize_indices_and_offsets(*self.input)
self.assertEqual(
self.merged.linearize_indices_and_offsets(*self.input), self.expected_input
)
self.assertEqual(
self.merged(self.expected_input, torch.BoolTensor([False])),
self.merged2(self.input),
)
def _test_inference_only(self, model):
with torch.no_grad():
outputs = model(
self.inference_only_expected_input, torch.BoolTensor([False])
)
ref_out0 = self.table0(
self.inference_only_input[0][0], self.inference_only_input[1][0]
)
ref_out1 = self.table1(
self.inference_only_input[0][1], self.inference_only_input[1][1]
)
ref_out2 = self.table4(
self.inference_only_input[0][2], self.inference_only_input[1][2]
)
self.assertEqual(outputs[0], ref_out0)
self.assertEqual(outputs[1], ref_out1)
self.assertEqual(outputs[2], ref_out2)
def test_inference(self):
model = copy.deepcopy(self.inference_only_merged)
self._test_inference_only(model)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
self._test_inference_only(model)
with torch.no_grad():
trace_model = torch.jit.trace(
model, [self.inference_only_expected_input, torch.BoolTensor([False])]
)
self._test_inference_only(trace_model)
def get_local_indice(self, indice):
table_id = 0
while indice >= self.merged.row_offsets[table_id + 1]:
table_id += 1
logical_indice = indice - self.merged.row_offsets[table_id].item()
return table_id, logical_indice
def test_training(self):
model = copy.deepcopy(self.merged)
outputs = model(self.expected_input, torch.BoolTensor([False]))
loss = outputs[0].sum() + outputs[1].sum() + outputs[2].sum() + outputs[3].sum()
default_lr = 0.01
weights = copy.deepcopy(model.weights)
loss.backward()
updated_weights = model.weights
for indice in self.expected_indices_weight_for_update:
table_id, logical_indice = self.get_local_indice(indice)
# grad will be all "1" for "sum"
grad = (
torch.ones(1, dtype=weights[table_id].dtype)
* self.expected_indices_weight_for_update[indice]
)
ref_updated_weight = weights[table_id][logical_indice] - default_lr * grad
self.assertEqual(
updated_weights[table_id][logical_indice], ref_updated_weight
)
def test_training_with_weight_decay(self):
import bench.custom_op_bench.optimizer
sgd = bench.custom_op_bench.optimizer.non_fused_sgd
model = MergedEmbeddingBagWithSGD.from_embeddingbag_list(
[self.table0, self.table1, self.table2, self.table3],
lr=1.0,
weight_decay=0.1,
)
outputs = model(self.expected_input, torch.BoolTensor([False]))
sgd_args = copy.deepcopy(model.sgd_args)
loss = outputs[0].sum() + outputs[1].sum() + outputs[2].sum() + outputs[3].sum()
weights = copy.deepcopy(model.weights)
loss.backward()
updated_weights = model.weights
for indice in self.expected_indices_weight_for_update:
table_id, logical_indice = self.get_local_indice(indice)
# grad will be all "1" for "sum"
ref_updated_weight = weights[table_id][logical_indice].detach().clone()
grad = (
torch.ones_like(ref_updated_weight, dtype=weights[table_id].dtype)
* self.expected_indices_weight_for_update[indice]
)
sgd(
ref_updated_weight,
grad,
None,
0, # momentum
sgd_args.lr,
sgd_args.weight_decay,
0, # dampening
False, # nestrov
)
self.assertEqual(
updated_weights[table_id][logical_indice],
ref_updated_weight,
rtol=0.01,
atol=0.01,
)
def test_cast_bfloat16(self):
model = copy.deepcopy(self.merged)
model.to_bfloat16_train()
w0 = torch.ops.torch_ipex.cat_bfloat16_float(
model.weights[0], model.sgd_args.bf16_trail[0]
)
self.assertEqual(w0, self.table0.weight.float())
w1 = torch.ops.torch_ipex.cat_bfloat16_float(
model.weights[1], model.sgd_args.bf16_trail[1]
)
self.assertEqual(w1, self.table1.weight)
w2 = model.weights[2]
self.assertEqual(w2, self.table2.weight)
self.assertEqual(
torch.zeros_like(w2, dtype=torch.bfloat16), model.sgd_args.bf16_trail[2]
)
class TestMergedEmbedding(TestCase):
table0 = nn.EmbeddingBag(100, 16, mode="mean", sparse=False).double()
table1 = nn.EmbeddingBag(50, 32, mode="sum", sparse=False)
table2 = nn.EmbeddingBag(
18000000,
128,
mode="sum",
include_last_offset=True,
_weight=torch.empty(18000000, 128, dtype=torch.bfloat16),
sparse=False,
)
table3 = nn.EmbeddingBag(100, 16, mode="mean", sparse=True).double()
merged = MergedEmbeddingBag.from_embeddingbag_list([table0, table1, table2])
merged2 = MergedEmbeddingBag(
[
(100, 16, "mean", table0.weight.dtype, table0.weight.detach(), False),
(50, 32, "sum", table1.weight.dtype, table1.weight.detach(), False),
(18000000, 128, "sum", table2.weight.dtype, table2.weight.detach(), False),
]
)
input = [
[
torch.LongTensor([10, 10, 15, 10, 20, 25]),
torch.LongTensor([[0, 30], [21, 15], [30, 11]]),
torch.LongTensor([10, 15, 17999999]),
],
[torch.LongTensor([0, 1, 3]), None, torch.LongTensor([0, 1, 2, 3])],
[
table0.include_last_offset,
table1.include_last_offset,
table2.include_last_offset,
],
]
expected_input = (
torch.LongTensor(
[10, 10, 15, 10, 20, 25, 0, 30, 21, 15, 30, 11, 10, 15, 17999999]
),
torch.LongTensor([0, 1, 3, 6, 8, 10, 12, 13, 14, 15]),
torch.LongTensor(
[10, 10, 15, 10, 20, 25, 100, 130, 121, 115, 130, 111, 160, 165, 18000149]
),
)
expected_indices_weight_for_update = {
10: 1 + 1 / 2 + 1 / 3,
15: 1 / 2,
20: 1 / 3,
25: 1 / 3,
100: 1,
111: 1,
115: 1,
121: 1,
130: 2,
160: 1,
165: 1,
18000149: 1,
}
def test_create_from_embedingbaglist_vs_create_from_init_function(self):
self.assertEqual(self.merged.weights, self.merged2.weights)
self.assertEqual(self.merged.pooling_modes, self.merged2.pooling_modes)
self.assertEqual(self.merged.dtypes, self.merged2.dtypes)
def test_input_prepare_function(self):
(
merged_indices,
merged_offsets,
merged_indices_with_row_offsets,
) = self.merged.linearize_indices_and_offsets(*self.input)
self.assertEqual(
self.merged.linearize_indices_and_offsets(*self.input), self.expected_input
)
self.assertEqual(
self.merged(self.expected_input, torch.BoolTensor([False])),
self.merged2(self.input),
)
def _test_inference_only(self, model):
with torch.no_grad():
outputs = model(self.expected_input, torch.BoolTensor([False]))
ref_out0 = self.table0(self.input[0][0], self.input[1][0])
ref_out1 = self.table1(self.input[0][1], self.input[1][1])
ref_out2 = self.table2(self.input[0][2], self.input[1][2])
self.assertEqual(outputs[0], ref_out0)
self.assertEqual(outputs[1], ref_out1)
self.assertEqual(outputs[2], ref_out2)
def test_inference(self):
model = copy.deepcopy(self.merged)
self._test_inference_only(model)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
self._test_inference_only(model)
with torch.no_grad():
trace_model = torch.jit.trace(
model, [self.expected_input, torch.BoolTensor([False])]
)
self._test_inference_only(trace_model)
def get_local_indice(self, indice):
table_id = 0
while indice >= self.merged.row_offsets[table_id + 1]:
table_id += 1
logical_indice = indice - self.merged.row_offsets[table_id].item()
return table_id, logical_indice
def test_training(self):
model = copy.deepcopy(self.merged)
outputs = model(self.expected_input, torch.BoolTensor([False]))
loss = outputs[0].sum() + outputs[1].sum() + outputs[2].sum()
loss.backward()
input_ind_t0 = torch.LongTensor([10, 10, 15, 10, 20, 25])
input_offset_t0 = torch.LongTensor([0, 1, 3])
input_t1 = torch.LongTensor([[0, 30], [21, 15], [30, 11]])
input_ind_t2 = torch.LongTensor([10, 15, 17999999])
input_offset_t2 = torch.LongTensor([0, 1, 2, 3])
out_t0 = self.table0(input_ind_t0, input_offset_t0)
out_t1 = self.table1(input_t1)
out_t2 = self.table2(input_ind_t2, input_offset_t2)
self.assertEqual(outputs[0], out_t0)
self.assertEqual(outputs[1], out_t1)
self.assertEqual(outputs[2], out_t2)
loss_naive = out_t0.sum() + out_t1.sum() + out_t2.sum()
loss_naive.backward()
self.assertEqual(self.table0.weight.grad, model.weights[0].grad)
self.assertEqual(self.table1.weight.grad, model.weights[1].grad)
self.assertEqual(self.table2.weight.grad, model.weights[2].grad)
if __name__ == "__main__":
test = unittest.main()
| 14,533 | 34.622549 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_jit_llga_fuser.py | import os
import subprocess
import unittest
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from test_ao_jit_llga_utils import (
JitLlgaTestCase,
LLGA_FUSION_GROUP,
llga_fp32_bf16_test_env,
get_eltwise_fn,
)
from torch.testing._internal.common_utils import run_tests, TEST_SCIPY
import intel_extension_for_pytorch as ipex
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
except RuntimeError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class TestOp(JitLlgaTestCase):
@llga_fp32_bf16_test_env
def test_conv2d(self):
for [
spatial,
in_channels,
out_channels,
kernel,
padding,
stride,
dilation,
g,
bias,
] in itertools.product(
[7, 8],
[8, 15],
[7, 16],
[3, 4],
[0, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False],
):
m = nn.Conv2d(
in_channels=in_channels * g,
out_channels=out_channels * g,
kernel_size=kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=g,
bias=bias,
)
x = torch.rand(1, in_channels * g, spatial, spatial)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_conv2d_script(self):
for bias in [True, False]:
m = nn.Conv2d(
in_channels=3,
out_channels=3,
kernel_size=3,
padding=1,
stride=1,
dilation=1,
groups=1,
bias=bias,
)
x = torch.rand(1, 3, 5, 5)
graph, _ = self.checkScript(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_bn2d(self):
m = nn.BatchNorm2d(32).eval()
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.eltwise = eltwise_fn
def forward(self, x):
return self.eltwise(x)
for eltwise in ["relu", "gelu", "tanh", "sqrt", "square"]:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_max_pool2d(self):
for [
spatial,
kernel,
padding,
stride,
dilation,
ceil_mode,
] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2], # [1, 2, 4], TODO: fix issue in pad calculation
[1], # [1, 2], TODO: backend support for dilation
[True, False],
):
m = nn.MaxPool2d(
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
x = torch.rand(1, 4, spatial, spatial)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_avg_pool2d(self):
for [
spatial,
kernel,
padding,
stride,
ceil_mode,
count_include_pad,
] in itertools.product(
[15, 16, 17, 18, 19],
[4, 5],
[0, 1, 2],
[1, 2, 4],
[False], # TODO: DNNL does not fully support ceil_mode=True
[True, False],
):
m = nn.AvgPool2d(
kernel_size=kernel,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
x = torch.rand(1, 4, spatial, spatial)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
@unittest.skipIf(True, "Enable once size peephole is supported")
def test_variable_kernel_avg_pool2d(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
x = F.avg_pool2d(
x,
kernel_size=(x.size(2), x.size(3)),
padding=0,
count_include_pad=False,
)
return x
x = torch.randn(1, 1000, 1, 1)
m = M()
graph, _ = self.checkTrace(m, [x])
# kernel_size is not Constant, shouldn't have any LLGA_FUSION_GROUP
# TODO: with shape specialization, should have 1 LLGA_FUSION_GROUP
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@llga_fp32_bf16_test_env
def test_softmax(self):
for dim in [-4, -3, -2, -1, 0, 1, 2, 3]:
m = nn.Softmax(dim=dim)
x = torch.rand(8, 12, 12, 12)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_softmax_different_output_dtype(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
return torch.nn.functional.softmax(x, dim=3, dtype=torch.bfloat16)
m = M()
x = torch.rand(8, 12, 12, 12)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
def _gen_binary_inputs(self, gen_permute=True):
for xshape, yshape in [
[[1, 32, 28, 28], [1, 32, 28, 28]],
[[1, 32, 28, 28], [1, 1, 28, 28]],
[[1, 32, 28, 28], [28]],
[[1, 32, 28, 28], [1]],
]:
yield torch.rand(xshape), torch.rand(yshape)
if gen_permute and xshape != yshape:
yield torch.rand(yshape), torch.rand(xshape)
@llga_fp32_bf16_test_env
def test_add_with_alpha(self):
def forward_add(x, y):
return torch.add(x, y, alpha=2)
for x, y in self._gen_binary_inputs():
graph, _ = self.checkTrace(forward_add, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_add_scalar(self):
def add_scalar(x):
return 42 + x + 3.14
x = torch.rand(32, 32)
graph, _ = self.checkTrace(add_scalar, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_add_with_duplicated_input(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.pool1 = nn.AdaptiveAvgPool2d((5, 7))
self.pool2 = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x):
x1 = self.pool1(x)
x2 = self.pool2(x)
return x1 + x2
m = M()
x = torch.randn(1, 3, 4, 4)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertGraphContainsExactly(graph, "aten::adaptive_avg_pool2d", 1)
self.assertFused(graph, "aten::add")
@llga_fp32_bf16_test_env
@unittest.skipIf(True, "Disable mul due to bad performance")
def test_mul(self):
def forward_mul(x, y):
return torch.mul(x, y) * 3
for x, y in self._gen_binary_inputs():
graph, _ = self.checkTrace(forward_mul, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
@llga_fp32_bf16_test_env
def test_identity_binary(self):
def forward(x):
return x * 1 + 0.0
x = torch.rand(32)
graph, _ = self.checkTrace(forward, [x])
self.assertFused(graph, ["aten::add", "aten::mul"])
@llga_fp32_bf16_test_env
def test_matmul(self):
def forward_matmul(x, y):
return x.matmul(y)
# TODO: support all shapes combination
x = torch.randn(8, 128, 368)
y = torch.randn(368, 3072)
graph, _ = self.checkTrace(forward_matmul, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_mm(self):
def forward_mm(x, y):
return torch.mm(x, y)
x = torch.randn(2, 3)
y = torch.randn(3, 3)
graph, _ = self.checkTrace(forward_mm, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_layer_norm(self):
# TODO: support more normalized_shape
m = torch.nn.LayerNorm(10)
x = torch.randn(2, 5, 10, 10)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_unsupported_layer_norm(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
# The value of normalized_shape is dependent on the input
return F.layer_norm(x, x.shape)
x = torch.randn(2, 5, 10, 10)
m = M()
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@llga_fp32_bf16_test_env
def test_cat(self):
def cat_along_dim(d):
def forward_cat(*inputs):
return torch.cat(inputs, d)
return forward_cat
for xshape in [
[8, 8, 8, 8],
[64, 8, 32],
[2048, 64],
]:
for d in range(len(xshape)):
x = torch.rand(xshape)
graph, _ = self.checkTrace(cat_along_dim(d), [x, x, x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_linear(self):
for freeze in [True, False]:
for bias in [True, False]:
x = torch.randn(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=bias)
graph, _ = self.checkTrace(m, [x], freeze)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::linear"])
@llga_fp32_bf16_test_env
def test_bmm(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
return x.matmul(y)
x = torch.randn(128, 16, 384, 64)
y = torch.randn(128, 16, 64, 384)
m = M()
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul"])
@llga_fp32_bf16_test_env
def test_bmm_mean(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
z = x.matmul(y)
z = torch.mean(z, dim=0, keepdim=True)
return z
x = torch.randn(128, 16, 384, 64)
y = torch.randn(128, 16, 64, 384)
m = M()
graph, _ = self.checkTrace(m, [x, y])
# single op partitions
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
@llga_fp32_bf16_test_env
def test_max(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
return torch.max(x, y)
x = torch.randn(1, 3, 32, 32)
y = torch.randn(1, 3, 32, 32)
m = M()
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_bmm_div(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
return x.matmul(y) / 2
x = torch.randn(128, 16, 384, 64)
y = torch.randn(128, 16, 64, 384)
m = M()
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul", "aten::div"])
@llga_fp32_bf16_test_env
def test_bmm_div_add(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y, z):
return x.matmul(y) / 2 + z
x = torch.randn(128, 16, 5, 64)
y = torch.randn(128, 16, 64, 5)
z = torch.randn(128, 1, 1, 5)
m = M()
graph, _ = self.checkTrace(m, [x, y, z])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul", "aten::div", "aten::add"])
@llga_fp32_bf16_test_env
def test_to(self):
class M(nn.Module):
def __init__(self, dtype):
super(M, self).__init__()
self.dtype = dtype
def forward(self, x):
return x.to(dtype=self.dtype)
for src_dtype, dst_dtype in [
[torch.bfloat16, torch.float],
[torch.float, torch.bfloat16],
]:
x = torch.randn((1, 16, 4, 64), dtype=src_dtype)
m = M(dst_dtype)
graph, _ = self.checkTrace(m, [x])
# we do not rewrite single to
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@llga_fp32_bf16_test_env
def test_typecheck(self):
x = torch.rand(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=True)
graph, traced = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::linear"])
# change the shape of the input, we should enter fallback graph
x = torch.rand(5, 28)
self.assertEqual(m(x), traced(x))
@llga_fp32_bf16_test_env
def test_unsupported_dtype(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
x = torch.fft.fftn(x)
x = torch.abs(x)
return x
x = torch.rand(10, 10, dtype=torch.complex64)
m = M()
graph, traced = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
self.assertGraphContainsExactly(graph, "aten::abs", 1)
@llga_fp32_bf16_test_env
# Currently graph with sub-block is unsupported
# %z : Tensor = prim::If(%8)
# block0():
# %z.7 : Tensor = aten::mul(%z.1, %y.1)
# -> (%z.7)
# block1():
# %z.13 : Tensor = aten::mul(%z.1, %x.1)
# -> (%z.13)
# return (%z)
def test_block(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y, z):
if z[0][0] > 0:
z = z * y
else:
z = z * x
return z
x = torch.rand(10, 10)
y = torch.rand(10, 10)
z = torch.rand(10, 10)
m = M()
graph, scripted = self.checkScript(m, [x, y, z])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
class TestFusionPattern(JitLlgaTestCase):
@llga_fp32_bf16_test_env
def test_conv2d_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=False)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
return x
for eltwise in [
"relu",
"leaky_relu",
"sigmoid",
"round",
"abs",
"square",
"abs",
"round",
"exp",
"hardswish",
"tanh",
"hardtanh",
"mish",
]:
for inplace in [False, True]:
eltwise_fn_name = eltwise + "_" if inplace else eltwise
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# test if relu_ is replace with relu by mutation removal pass
self.assertFused(graph, ["aten::" + eltwise_fn_name])
# test if relu is fused into the fusion group
self.assertFused(graph, ["aten::" + eltwise])
@unittest.skip("Accuracy issue for conv+relu+TypeCast and conv+bn+relu+TypeCast")
@llga_fp32_bf16_test_env
def test_type_promotion(self):
class M(nn.Module):
def __init__(
self,
):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 1, dtype=torch.bfloat16)
self.bn2 = nn.BatchNorm2d(32, dtype=torch.bfloat16)
def forward(self, x, y):
y = self.conv2(y)
y = self.bn2(y)
y = torch.nn.functional.relu(y)
x = self.conv1(x)
x = self.bn1(x)
x = torch.nn.functional.relu(x)
z = y + x
return z
m = M()
x = torch.randn(3, 32, 32, 32)
y = torch.randn(3, 32, 32, 32, dtype=torch.bfloat16)
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
@llga_fp32_bf16_test_env
def test_conv2d_clamp(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv5 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
def forward(self, x):
x = self.conv1(x)
x = torch.clamp(x, min=float("-inf"))
x = self.conv2(x)
x = torch.clamp(x, min=-5)
x = self.conv3(x)
x = torch.clamp(x, min=0, max=float("inf"))
x = self.conv4(x)
x = torch.clamp(x, min=1, max=5)
x = self.conv5(x)
x = torch.clamp(x, max=2)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
m = M()
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 5)
self.assertFused(graph, ["aten::_convolution", "aten::clamp"])
@llga_fp32_bf16_test_env
def test_ensure_tensor_is_rewrapped(self):
class M(nn.Module):
def __init__(self, eltwise_fn, data_type):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True, dtype=data_type)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True, dtype=data_type)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True, dtype=data_type)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True, dtype=data_type)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
x = self.eltwise(x)
y = self.conv3(y)
y = self.eltwise(y)
y = self.conv4(y)
y = self.eltwise(y)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = "relu"
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
for data_type in [torch.bfloat16, torch.float]:
m = M(eltwise_fn, data_type)
m = m.to(memory_format=torch.channels_last)
x = torch.rand(1, 32, 28, 28, dtype=data_type).to(
memory_format=torch.channels_last
)
y = torch.rand(1, 32, 28, 28, dtype=data_type).to(
memory_format=torch.channels_last
)
# Simply test if the output is accurate
# The output of the fourth partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA. In resnext101 32x16d, we had encountered an accuracy issue.
# The UT checks that the input to adaptive_avg_pool_2d has not been wrapped by
# LlgaTensorImpl (assertEqual would fail in that case).
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 4)
@llga_fp32_bf16_test_env
def test_conv2d_bn(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
return x
m = M().eval()
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::_convolution", "aten::batch_norm"])
@llga_fp32_bf16_test_env
def test_conv2d_bn_relu(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
return x
m = M().eval()
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph, ["aten::_convolution", "aten::batch_norm", "aten::relu"]
)
@llga_fp32_bf16_test_env
def test_bn2d_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.eltwise = eltwise_fn
self.bn = nn.BatchNorm2d(32)
def forward(self, x):
x = self.bn(x)
x = self.eltwise(x)
return x
for eltwise in ["relu"]:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn).eval()
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::batch_norm", "aten::" + eltwise])
@llga_fp32_bf16_test_env
def test_remove_redundant_to(self):
class M(nn.Module):
def __init__(
self,
):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 1)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = x.to(torch.float32)
x = self.bn1(x)
x = nn.functional.relu(x)
return x
m = M()
x = torch.randn(3, 32, 32, 32)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_do_not_map_select(self):
class M(nn.Module):
def __init__(
self,
):
super(M, self).__init__()
def forward(self, x, y):
z = y.expand_as(x)
x = torch.masked_fill(x, z, 1)
return x
m = M()
x = torch.randn(3, 32, 32, 32)
y = torch.randn(3, 32, 32, 1).to(torch.bool)
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@llga_fp32_bf16_test_env
def test_avg_pool2d_add(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.pool1 = nn.AvgPool2d(
3, stride=1, padding=1, count_include_pad=False
)
self.pool2 = nn.AvgPool2d(
3, stride=1, padding=1, count_include_pad=False
)
def forward(self, x):
x1 = self.pool1(x)
x2 = self.pool2(x)
return x1 + x2
m = M()
x = torch.randn(1, 3, 4, 4)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::avg_pool2d", "aten::add"])
@unittest.skip("Semi-Compiler unit-test")
@llga_fp32_bf16_test_env
def test_mha_pattern(self):
def forward_test(x, y, z, a):
tmp = torch.matmul(x, y) / 8.0 + a
tmp = torch.softmax(tmp, -1)
tmp = tmp.matmul(z)
tmp = torch.permute(tmp, (0, 2, 1, 3))
return tmp.contiguous()
x = torch.randn(128, 16, 384, 64)
y = torch.randn(128, 16, 64, 384)
z = torch.randn(128, 16, 384, 64)
a = torch.rand(128, 1, 1, 384)
graph, _ = self.checkTrace(forward_test, [x, y, z, a])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
[
"aten::matmul",
"aten::div",
"aten:add",
"aten:softmax",
"aten::permute",
"aten::contiguous",
],
)
@llga_fp32_bf16_test_env
def test_do_not_map_permute(self):
def forward_test(x, y, z, a):
tmp = torch.matmul(x, y) / 8.0 + a
tmp = torch.softmax(tmp, -1)
tmp = tmp.matmul(z)
temp = tmp.view(tmp.numel())
tmp = torch.permute(tmp, (0, 2, 1, 3))
temp.add_(-1)
return tmp.contiguous()
x = torch.randn(128, 16, 384, 64)
y = torch.randn(128, 16, 64, 384)
z = torch.randn(128, 16, 384, 64)
a = torch.rand(128, 1, 1, 384)
graph, _ = self.checkTrace(forward_test, [x, y, z, a])
self.assertFused(
graph,
[
"aten::matmul",
"aten::div",
"aten::add",
"aten::softmax",
"aten::contiguous",
],
)
@llga_fp32_bf16_test_env
def test_no_contiguous_no_op(self):
def forward(x):
return x.contiguous()
x = torch.rand(32, 28)
graph, traced = self.checkTrace(forward, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
@llga_fp32_bf16_test_env
def test_contiguous_mapping_padded(self):
def forward(x):
tmp = torch.as_strided(x, (15, 15), (16, 1))
return tmp.contiguous()
x = torch.rand(16, 16)
graph, traced = self.checkTrace(forward, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_contiguous_mapping_zero_stride(self):
def forward(x):
tmp = torch.as_strided(x, (32, 28), (0, 1))
return tmp.contiguous()
x = torch.rand(28, 32)
graph, traced = self.checkTrace(forward, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@llga_fp32_bf16_test_env
def test_linear_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn, bias):
super(M, self).__init__()
self.linear = nn.Linear(28, 64, bias)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
return x
# TODO: use itertools.product once all combinations is supported
for [has_bias, eltwise] in [
[True, "relu"],
[False, "relu"],
[True, "gelu"],
[False, "gelu"],
[True, "sigmoid"],
[False, "sigmoid"],
[False, "hardtanh"],
# [False, 'relu6'], # TODO: map relu6 in the bridge
[False, "elu"],
]:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn, has_bias)
x = torch.rand(32, 28, requires_grad=False)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::" + eltwise])
@llga_fp32_bf16_test_env
def test_conv2d_sum(self):
class M(nn.Module):
def __init__(self, bias=False):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU()
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn3 = nn.BatchNorm2d(32)
def forward(self, x, y):
x = self.conv1(x)
x = self.bn1(x)
y = self.conv2(y)
y = self.bn2(y)
z = self.relu(x + y)
z = self.conv3(z)
z = self.bn3(z)
return z
for bias in [True, False]:
m = M(bias).eval()
x = torch.rand(1, 32, 16, 16, requires_grad=False)
y = torch.rand(1, 32, 16, 16, requires_grad=False)
graph, _ = self.checkTrace(m, [x, y])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
@llga_fp32_bf16_test_env
def test_wildcard(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
y = self.eltwise(x)
return [x, y]
# The pattern is as the following:
# conv
# | \
# eltwise \
# | \
# ListConstruct
#
# The output of conv is used by a wildcard op: ListConstruct.
# Thus conv-eltwise cannot be selected into the same Partition.
m = M()
x = torch.rand(1, 32, 28, 28)
graph, _ = self.checkTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["aten::_convolution", "aten::relu"])
class TestAPI(JitLlgaTestCase):
def test_weight_cache_api(self):
weight_cache_enabled_default_value = ipex._C._jit_llga_weight_cache_enabled()
self.assertTrue(weight_cache_enabled_default_value)
ipex._C._jit_set_llga_weight_cache_enabled(False)
weight_cache_enabled = ipex._C._jit_llga_weight_cache_enabled()
self.assertFalse(weight_cache_enabled)
# set the value back to the default one
ipex._C._jit_set_llga_weight_cache_enabled(weight_cache_enabled_default_value)
class TestDebugLog(JitLlgaTestCase):
def test_fusion_group_name(self):
num = 0
num_debug_str = 0
loc = os.path.dirname(os.path.abspath(__file__))
with subprocess.Popen(
'PYTORCH_JIT_LOG_LEVEL=":>>kernel:>>" python -u {}/profile_ipex_op.py --llga'.format(
loc
),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
for line in p.stdout.readlines():
line = str(line, 'utf-8').strip()
if line.__contains__("LLGA_bridge::prepareKernel"):
num += 1
if line.__contains__("Executing partition"):
num_debug_str += 1
self.assertTrue(num == 2, "IPEX LLGA op profiling info not found.")
self.assertTrue(num_debug_str > 0, "IPEX LLGA debug info not found")
@unittest.skip("Enable when integration with dynamo aot_autograd is more stable")
class TestDynamoAOT(JitLlgaTestCase):
def test_dynamo_aot_ts_onednn(self):
class Seq(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
)
def forward(self, x):
return self.layers(x)
mod = Seq()
import torch._dynamo
aot_mod = torch._dynamo.optimize("aot_ts", nopython=True)(mod)
for _ in range(10):
with torch.jit.fuser("fuser3"):
loss = aot_mod(torch.rand([10, 10])).sum()
loss.backward()
torch._dynamo.reset()
class TestModel(JitLlgaTestCase):
@skipIfNoTorchVision
@llga_fp32_bf16_test_env
def _test_vision(self, model_name):
m = getattr(torchvision.models, model_name)().eval()
x = torch.rand(1, 3, 224, 224) / 10
graph, _ = self.checkTrace(m, [x])
self.assertFused(
graph,
[
"aten::_convolution",
"aten::batch_norm",
"aten::relu",
"aten::linear",
"aten::avg_pool2d",
"aten::max_pool2d",
],
)
for model_name, enabled in [
["resnet50", True],
["resnext50_32x4d", True],
["resnext101_32x8d", True],
["densenet121", False],
["googlenet", TEST_SCIPY],
["mobilenet_v2", True],
["mnasnet1_0", True],
["squeezenet1_0", True],
["vgg16", True],
["alexnet", True],
["shufflenet_v2_x1_0", True],
["wide_resnet50_2", True],
]:
def wrapper(mname):
@unittest.skipIf(not enabled, "Disabled")
def test(self):
return self._test_vision(mname)
return test
setattr(TestModel, "test_vision_%s" % model_name, wrapper(model_name))
if __name__ == "__main__":
run_tests()
| 36,230 | 32.300551 | 98 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_auto_channels_last.py | import unittest
from common_utils import TestCase
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
from intel_extension_for_pytorch.utils.channels_last_1d import (
is_contiguous_channels_last_1d,
)
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class TestAutoChannelsLast(TestCase):
def _get_covnNd(self, dim):
class ConvNd(nn.Module):
def __init__(self, dim):
super(ConvNd, self).__init__()
if dim == 1:
self.conv = nn.Conv1d(16, 33, 3)
elif dim == 2:
self.conv = nn.Conv2d(16, 33, 3)
elif dim == 3:
self.conv = nn.Conv3d(16, 33, 3)
def forward(self, x):
x = self.conv(x)
return x
model = ConvNd(dim=dim)
return model
def _get_sequential_conv2d(self):
class Conv2d(nn.Module):
def __init__(self):
super(Conv2d, self).__init__()
self.conv1 = nn.Conv2d(16, 33, 3)
self.conv2 = nn.Conv2d(33, 33, 3)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Conv2d()
return model
def _get_covnNd_relu(self, dim):
class ConvNdReLU(nn.Module):
def __init__(self, dim):
super(ConvNdReLU, self).__init__()
if dim == 1:
self.conv = nn.Conv1d(16, 33, 3)
elif dim == 2:
self.conv = nn.Conv2d(16, 33, 3)
elif dim == 3:
self.conv = nn.Conv3d(16, 33, 3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
model = ConvNdReLU(dim=dim)
return model
def _get_covnNd_linear(self, dim):
class ConvNdLinear(nn.Module):
def __init__(self, dim):
super(ConvNdLinear, self).__init__()
if dim == 1:
self.conv = nn.Conv1d(16, 33, 3)
elif dim == 2:
self.conv = nn.Conv2d(16, 33, 3)
elif dim == 3:
self.conv = nn.Conv3d(16, 33, 3)
self.linear = nn.Linear(48, 48)
def forward(self, x):
x = self.conv(x)
x = self.linear(x)
return x
model = ConvNdLinear(dim=dim)
return model
def _get_ipex_optimized_model_and_output_tensor(
self, model, dim, disable_auto_channels_last=False
):
model.eval()
if dim == 1:
x = torch.randn(20, 16, 50)
elif dim == 2:
x = torch.randn(20, 16, 50, 50)
elif dim == 3:
x = torch.randn(20, 16, 50, 50, 50)
if disable_auto_channels_last:
ipex.disable_auto_channels_last()
model = ipex.optimize(model, weights_prepack=False)
output = model(x)
return model, output
def get_channels_last_modules(self, module):
channels_last_modules = []
for name, param in module.named_parameters():
if param.is_contiguous(memory_format=torch.channels_last):
channels_last_modules.append(name)
return channels_last_modules
def test_auto_channels_last(self):
model = self._get_covnNd(dim=1)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=1)
self.assertTrue(is_contiguous_channels_last_1d(model.conv.weight))
self.assertTrue(is_contiguous_channels_last_1d(output))
model = self._get_covnNd(dim=2)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=2)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last))
model = self._get_covnNd(dim=3)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=3)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.channels_last_3d)
)
def test_disable_auto_channels_last(self):
model = self._get_covnNd(dim=1)
model, output = self._get_ipex_optimized_model_and_output_tensor(
model, dim=1, disable_auto_channels_last=True
)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.contiguous_format)
)
self.assertTrue(output.is_contiguous(memory_format=torch.contiguous_format))
model = self._get_covnNd(dim=2)
model, output = self._get_ipex_optimized_model_and_output_tensor(
model, dim=2, disable_auto_channels_last=True
)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.contiguous_format)
)
self.assertTrue(output.is_contiguous(memory_format=torch.contiguous_format))
model = self._get_covnNd(dim=3)
model, output = self._get_ipex_optimized_model_and_output_tensor(
model, dim=3, disable_auto_channels_last=True
)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.contiguous_format)
)
self.assertTrue(output.is_contiguous(memory_format=torch.contiguous_format))
def test_auto_channels_last_recursion(self):
model = self._get_sequential_conv2d()
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=2)
self.assertTrue(
model.conv1.weight.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(
model.conv2.weight.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last))
def test_auto_channels_last_memory_format_propagation(self):
# memory format propagates through channels_last compatible layers
model = self._get_covnNd_relu(dim=1)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=1)
self.assertTrue(is_contiguous_channels_last_1d(model.conv.weight))
self.assertTrue(is_contiguous_channels_last_1d(output))
model = self._get_covnNd_relu(dim=2)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=2)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last))
model = self._get_covnNd_relu(dim=3)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=3)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.channels_last_3d)
)
# memory format reverts back to contiguous_format as linear is channels_last incompatible
model = self._get_covnNd_linear(dim=1)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=1)
self.assertTrue(is_contiguous_channels_last_1d(model.conv.weight))
self.assertTrue(output.is_contiguous(memory_format=torch.contiguous_format))
model = self._get_covnNd_linear(dim=2)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=2)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(output.is_contiguous(memory_format=torch.contiguous_format))
model = self._get_covnNd_linear(dim=3)
model, output = self._get_ipex_optimized_model_and_output_tensor(model, dim=3)
self.assertTrue(
model.conv.weight.is_contiguous(memory_format=torch.channels_last_3d)
)
self.assertTrue(output.is_contiguous(memory_format=torch.contiguous_format))
@skipIfNoTorchVision
def test_auto_channels_last_resnet50(self):
model = torchvision.models.resnet.resnet50(pretrained=False)
model.eval()
# manual
model_channels_last = model.to(memory_format=torch.channels_last)
model_channels_last = self.get_channels_last_modules(model_channels_last)
# auto
model_ipex = ipex.optimize(model, weights_prepack=False)
model_ipex_channels_last_modules = self.get_channels_last_modules(model_ipex)
self.assertEqual(model_channels_last, model_ipex_channels_last_modules)
def test_auto_channels_last_for_int8(self):
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class ConvNd(torch.nn.Module):
def __init__(self, dim, in_channels, out_channels, kernel_size, stride):
super(ConvNd, self).__init__()
self.conv = conv_module[dim](
in_channels, out_channels, kernel_size=kernel_size, stride=stride
)
def forward(self, x):
return self.conv(x)
def _test_conv(dim):
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
x_shape = (2, 3) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
model = ConvNd(dim, 3, 4, 3, 2).eval()
qconfig = ipex.quantization.default_static_qconfig
prepared_model = ipex.quantization.prepare(model, qconfig, x)
# do calibration
y = prepared_model(x)
convert_model = ipex.quantization.convert(prepared_model)
with torch.no_grad():
traced_model = torch.jit.trace(convert_model, x)
traced_model = torch.jit.freeze(traced_model)
for _ in range(3):
y = traced_model(x)
return y
# disable auto channels_last
ipex.disable_auto_channels_last()
self.assertTrue(
_test_conv(2).is_contiguous(memory_format=torch.contiguous_format)
)
self.assertTrue(
_test_conv(3).is_contiguous(memory_format=torch.contiguous_format)
)
# enable auto channels_last
ipex.enable_auto_channels_last()
self.assertTrue(_test_conv(2).is_contiguous(memory_format = torch.channels_last))
# temporary disable before https://github.com/pytorch/pytorch/pull/74023 merged
# self.assertTrue(_test_conv(3).is_contiguous(memory_format = torch.channels_last_3d))
if __name__ == '__main__':
test = unittest.main()
| 10,796 | 37.423488 | 97 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_frozen_batch_norm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import unittest
from common_utils import TestCase
from intel_extension_for_pytorch.nn import FrozenBatchNorm2d
try:
import torchvision # noqa: F401
from torchvision.ops.misc import FrozenBatchNorm2d as FrozenBN2d
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class FrozenBNTester(TestCase):
@skipIfNoTorchVision
def test_frozen_batch_norm(self):
m = FrozenBatchNorm2d(100)
m1 = FrozenBN2d(100)
running_mean = torch.randn(100)
running_var = torch.randn(100)
m.running_mean = running_mean
m.running_var = running_var
m1.running_mean = running_mean
m1.running_var = running_var
input = torch.randn(20, 100, 35, 45)
x = input.clone().detach().requires_grad_()
x1 = input.clone().detach().requires_grad_()
y = m(x)
y1 = m1(x1)
self.assertTrue(y.dtype == torch.float32)
self.assertEqual(y, y1)
# backward
y.mean().backward()
y1.mean().backward()
self.assertTrue(x.grad.dtype == torch.float32)
self.assertEqual(x.grad, x1.grad)
# test channels last
x2 = (
input.clone()
.detach()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y2 = m(x2)
self.assertTrue(y2.dtype == torch.float32)
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y2, y1)
y2.mean().backward()
self.assertTrue(x2.grad.dtype == torch.float32)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x2.grad, x1.grad)
@skipIfNoTorchVision
def test_frozen_batch_norm_bfloat16(self):
m = FrozenBatchNorm2d(100)
m1 = FrozenBN2d(100)
running_mean = torch.randn(100)
running_var = torch.randn(100)
m.running_mean = running_mean
m.running_var = running_var
m1.running_mean = running_mean
m1.running_var = running_var
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
input = torch.randn(20, 100, 35, 45).bfloat16()
x = input.clone().detach().requires_grad_()
x1 = input.clone().detach().requires_grad_()
y = m(x)
y1 = m1(x1)
self.assertTrue(y.dtype == torch.bfloat16)
self.assertEqual(y, y1, prec=0.1)
# backward
y.mean().backward()
y1.mean().backward()
self.assertTrue(x.grad.dtype == torch.bfloat16)
self.assertEqual(x.grad, x1.grad)
# test channels last
x2 = (
input.clone()
.detach()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y2 = m(x2)
self.assertTrue(y2.dtype == torch.bfloat16)
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y2, y1, prec=0.1)
y2.mean().backward()
self.assertTrue(x2.grad.dtype == torch.bfloat16)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x2.grad, x1.grad)
if __name__ == "__main__":
test = unittest.main()
| 3,525 | 32.903846 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_mha.py | import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
import intel_extension_for_pytorch as ipex
import math
import copy
from common_utils import TestCase
# (from Diffusers 0.12.1)
class SD_MHA_Model_v1(nn.Module):
def __init__(self, scale, num_heads, weightsize, hiddensize):
super(SD_MHA_Model_v1, self).__init__()
self.scale = scale
self.heads = num_heads
self.weightsize = weightsize
self.hiddensize = hiddensize
self.query = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.key = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.value = nn.Linear(self.weightsize, self.hiddensize, bias=True)
def batch_to_head_dim(self, tensor):
head_size = self.heads
batch_size, seq_len, dim = tensor.shape
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
tensor = tensor.permute(0, 2, 1, 3).reshape(
batch_size // head_size, seq_len, dim * head_size
)
return tensor
def head_to_batch_dim(self, tensor):
head_size = self.heads
batch_size, seq_len, dim = tensor.shape
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
tensor = tensor.permute(0, 2, 1, 3).reshape(
batch_size * head_size, seq_len, dim // head_size
)
return tensor
def get_attention_scores(self, query, key):
dtype = query.dtype
attention_scores = torch.baddbmm(
torch.empty(
query.shape[0],
query.shape[1],
key.shape[1],
dtype=query.dtype,
device=query.device,
),
query,
key.transpose(-1, -2),
beta=0,
alpha=self.scale,
)
attention_probs = attention_scores.softmax(dim=-1)
attention_probs = attention_probs.to(dtype)
return attention_probs
def forward(self, x):
query = self.query(x)
query = self.head_to_batch_dim(query)
key = self.key(x)
key = self.head_to_batch_dim(key)
value = self.value(x)
value = self.head_to_batch_dim(value)
attention_probs = self.get_attention_scores(query, key)
hidden_states = torch.bmm(attention_probs, value)
output = self.batch_to_head_dim(hidden_states)
return output
# (from Diffusers 0.12.1)
class SD_MHA_Model_v2(nn.Module):
def __init__(self, scale, num_heads, weightsize, hiddensize):
super(SD_MHA_Model_v2, self).__init__()
self.scale = scale
self.heads = num_heads
self.weightsize = weightsize
self.hiddensize = hiddensize
self.query = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.key = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.value = nn.Linear(self.weightsize, self.hiddensize, bias=True)
def batch_to_head_dim(self, tensor):
head_size = self.heads
batch_size, seq_len, dim = tensor.shape
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
tensor = tensor.permute(0, 2, 1, 3).reshape(
batch_size // head_size, seq_len, dim * head_size
)
return tensor
def head_to_batch_dim(self, tensor):
head_size = self.heads
batch_size, seq_len, dim = tensor.shape
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
tensor = tensor.permute(0, 2, 1, 3).reshape(
batch_size * head_size, seq_len, dim // head_size
)
return tensor
def get_attention_scores(self, query, key):
dtype = query.dtype
attention_scores = torch.baddbmm(
torch.empty(
query.shape[0],
query.shape[1],
key.shape[1],
dtype=query.dtype,
device=query.device,
),
query,
key.transpose(-1, -2),
beta=0,
alpha=self.scale,
)
attention_probs = attention_scores.softmax(dim=-1)
attention_probs = attention_probs.to(dtype)
return attention_probs
def forward(self, x, y):
query = self.query(x)
query = self.head_to_batch_dim(query)
key = self.key(y)
key = self.head_to_batch_dim(key)
value = self.value(y)
value = self.head_to_batch_dim(value)
attention_probs = self.get_attention_scores(query, key)
hidden_states = torch.bmm(attention_probs, value)
output = self.batch_to_head_dim(hidden_states)
return output
# (from Diffusers 0.13)
class SD_MHA_Model_v3(nn.Module):
def __init__(self, num_heads, weightsize, hiddensize):
super(SD_MHA_Model_v3, self).__init__()
self.heads = num_heads
self.weightsize = weightsize
self.hiddensize = hiddensize
self.query = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.key = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.value = nn.Linear(self.weightsize, self.hiddensize, bias=True)
def forward(self, x):
query = self.query(x)
key = self.key(x)
value = self.value(x)
batch_size, sequence_length, inner_dim = x.shape
head_dim = inner_dim // self.heads
query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
hidden_states = F.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.transpose(1, 2).reshape(
batch_size, -1, self.heads * head_dim
)
output = hidden_states.to(query.dtype)
return output
# (from Diffusers 0.13)
class SD_MHA_Model_scale_v3(nn.Module):
def __init__(self, num_heads, weightsize, hiddensize, scale):
super(SD_MHA_Model_scale_v3, self).__init__()
self.heads = num_heads
self.weightsize = weightsize
self.hiddensize = hiddensize
self.scale = scale
self.query = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.key = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.value = nn.Linear(self.weightsize, self.hiddensize, bias=True)
def forward(self, x):
query = self.query(x)
key = self.key(x)
value = self.value(x)
batch_size, sequence_length, inner_dim = x.shape
head_dim = inner_dim // self.heads
query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
hidden_states = F.scaled_dot_product_attention(
query,
key,
value,
attn_mask=None,
dropout_p=0.0,
is_causal=False,
scale=self.scale,
)
hidden_states = hidden_states.transpose(1, 2).reshape(
batch_size, -1, self.heads * head_dim
)
output = hidden_states.to(query.dtype)
return output
# (from Diffusers 0.13)
class SD_MHA_Model_v4(nn.Module):
def __init__(self, num_heads, weightsize, hiddensize):
super(SD_MHA_Model_v4, self).__init__()
self.heads = num_heads
self.weightsize = weightsize
self.hiddensize = hiddensize
self.query = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.key = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.value = nn.Linear(self.weightsize, self.hiddensize, bias=True)
def forward(self, x, y):
query = self.query(x)
key = self.key(y)
value = self.value(y)
batch_size, sequence_length, inner_dim = x.shape
head_dim = inner_dim // self.heads
query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
hidden_states = F.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.transpose(1, 2).reshape(
batch_size, -1, self.heads * head_dim
)
output = hidden_states.to(query.dtype)
return output
# (from Diffusers 0.13)
class SD_MHA_Model_scale_v4(nn.Module):
def __init__(self, num_heads, weightsize, hiddensize, scale):
super(SD_MHA_Model_scale_v4, self).__init__()
self.heads = num_heads
self.weightsize = weightsize
self.hiddensize = hiddensize
self.scale = scale
self.query = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.key = nn.Linear(self.weightsize, self.hiddensize, bias=True)
self.value = nn.Linear(self.weightsize, self.hiddensize, bias=True)
def forward(self, x, y):
query = self.query(x)
key = self.key(y)
value = self.value(y)
batch_size, sequence_length, inner_dim = x.shape
head_dim = inner_dim // self.heads
query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
hidden_states = F.scaled_dot_product_attention(
query,
key,
value,
attn_mask=None,
dropout_p=0.0,
is_causal=False,
scale=self.scale,
)
hidden_states = hidden_states.transpose(1, 2).reshape(
batch_size, -1, self.heads * head_dim
)
output = hidden_states.to(query.dtype)
return output
# (Fake Diffusers Model - Fall back to ipex::mha_scores_calc)
class Fake_SD_MHA_Model(nn.Module):
def __init__(self, dim_per_head, softmax_dim=-1):
super(Fake_SD_MHA_Model, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.dim_per_head = dim_per_head
def forward(self, mat1, mat2, mat3, bias):
mat1 = mat1 / math.sqrt(self.dim_per_head)
qk = torch.matmul(mat1, mat2.transpose(2, 3))
scores = self.softmax(qk + bias)
output = torch.matmul(scores, mat3)
return output
class MHA_Model_BERT(nn.Module):
def __init__(self, scale, num_heads, head_dims, permute_idx, trans_a, trans_b):
super(MHA_Model_BERT, self).__init__()
self.scale = scale
self.num_heads = num_heads
self.head_dims = head_dims
self.embed_dims = self.num_heads * self.head_dims
self.query = nn.Linear(self.embed_dims, self.embed_dims, bias=True)
self.key = nn.Linear(self.embed_dims, self.embed_dims, bias=True)
self.value = nn.Linear(self.embed_dims, self.embed_dims, bias=True)
self.permute_idx = permute_idx
self.trans_a = trans_a
self.trans_b = trans_b
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_heads, self.head_dims)
x = x.view(new_x_shape)
return x.permute(self.permute_idx)
def forward(self, x, mask):
query_layer = self.transpose_for_scores(self.query(x))
key_layer = self.transpose_for_scores(self.key(x)).transpose(
self.trans_a, self.trans_b
)
value_layer = self.transpose_for_scores(self.value(x))
attention_scores = torch.matmul(query_layer, key_layer) / self.scale + mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(self.permute_idx).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dims,)
context_layer = context_layer.view(new_context_layer_shape)
return context_layer
class MHA_Model_Distil(nn.Module):
def __init__(
self,
scale,
num_heads,
head_dims,
trans_a,
trans_b,
trans_c,
fill_value=-float("inf"),
):
super(MHA_Model_Distil, self).__init__()
self.scale = scale
self.n_head = num_heads
self.head_dims = head_dims
self.dim = self.n_head * self.head_dims
self.q_lin = nn.Linear(self.dim, self.dim, bias=True)
self.k_lin = nn.Linear(self.dim, self.dim, bias=True)
self.v_lin = nn.Linear(self.dim, self.dim, bias=True)
self.trans_a = trans_a
self.trans_b = trans_b
self.trans_c = trans_c
self.fill_value = fill_value
def forward(self, x, mask):
bs, q_length, dim = x.size()
k_length = x.size(1)
def shape(x: torch.Tensor) -> torch.Tensor:
"""separate heads"""
return x.view(bs, -1, self.n_head, self.head_dims).transpose(
self.trans_a, self.trans_b
)
def unshape(x: torch.Tensor) -> torch.Tensor:
"""group heads"""
return (
x.transpose(self.trans_a, self.trans_b)
.contiguous()
.view(bs, -1, self.n_head * self.head_dims)
)
q = shape(self.q_lin(x))
k = shape(self.k_lin(x))
v = shape(self.v_lin(x))
mask_reshp = (bs, 1, 1, k_length)
q = q / self.scale
scores = torch.matmul(q, k.transpose(self.trans_b, self.trans_c))
mask = (mask == 0).view(mask_reshp).expand_as(scores)
scores = scores.masked_fill(mask, self.fill_value)
weights = nn.functional.softmax(scores, dim=-1)
context = torch.matmul(weights, v)
context_layer = unshape(context)
return context_layer
class MHA_Model_ViT(nn.Module):
def __init__(
self,
scale,
num_heads,
head_dims,
permute_idx,
trans_a,
trans_b,
select_a,
select_b,
):
super(MHA_Model_ViT, self).__init__()
self.scale = 1.0 / scale
self.num_heads = num_heads
self.head_dims = head_dims
self.embed_dims = self.num_heads * self.head_dims
self.qkv = nn.Linear(self.embed_dims, self.embed_dims * 3, bias=True)
self.permute_idx = permute_idx
self.trans_a = trans_a
self.trans_b = trans_b
self.select_a = select_a
self.select_b = select_b
def forward(self, x):
B, N, _ = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, self.head_dims)
.permute(self.permute_idx)
)
q, k, v = qkv[0], qkv[self.select_a], qkv[self.select_b]
attn = (q @ k.transpose(self.trans_a, self.trans_b)) * self.scale
attn = attn.softmax(dim=-1)
context_layer = (
(attn @ v)
.transpose(self.select_a, self.select_b)
.reshape(B, N, self.embed_dims)
)
return context_layer
bs = [5, 3, 11]
seq = [128, 384, 31]
scales = [8, 13, 21]
num_heads = [12, 16, 29]
head_dims = [64, 96, 17]
# In this UT case, "+15" is desgined to trigger the overflow of SoftMax when using pos_FLT_MIN.
# Since the input values are very large for the BMM and SoftMax, the resulting accumulations of MHA
# result will also be large, thus the tolerance value should be set to 1.5e-0 for such case.
class TransFreeMHATester(TestCase):
def sd_mha_bf16_common(self, model, mat1, mat2=None):
for neg_FLT_MIN in [True, False]:
sd_mha_model = copy.deepcopy(model)
if mat2 is not None:
inputs = (
(mat1.to(torch.bfloat16), mat2.to(torch.bfloat16))
if not neg_FLT_MIN
else (
(mat1 + 15).to(torch.bfloat16),
(mat2 + 15).to(torch.bfloat16),
)
)
else:
inputs = (
(mat1.to(torch.bfloat16),)
if not neg_FLT_MIN
else ((mat1 + 15).to(torch.bfloat16),)
)
mha_ipex = ipex.optimize(sd_mha_model, dtype=torch.bfloat16, level="O1")
with torch.cpu.amp.autocast(), torch.no_grad():
mha_ipex = torch.jit.trace(mha_ipex, inputs)
mha_ipex = torch.jit.freeze(mha_ipex)
for _ in range(2):
mha_jit = mha_ipex(*inputs)
mha_ref = sd_mha_model(*inputs)
self.assertEqual(mha_ref, mha_jit, prec=1.5e-0 if neg_FLT_MIN else 1e-2)
mha_graph = mha_ipex.graph_for(*inputs)
self.assertTrue(
any(n.kind() == "ipex::sd_flash_mha" for n in mha_graph.nodes())
)
def test_sd_mha_bf16_v1(self):
mat = torch.randn(2, 4096, 320)
sd_mha_model = SD_MHA_Model_v1(0.3, 8, 320, 320).eval()
self.sd_mha_bf16_common(sd_mha_model, mat)
def test_sd_mha_bf16_v2(self):
mat1 = torch.randn(2, 4096, 320)
mat2 = torch.randn(2, 77, 320)
sd_mha_model = SD_MHA_Model_v2(0.3, 8, 320, 320).eval()
self.sd_mha_bf16_common(sd_mha_model, mat1, mat2)
def test_sd_mha_bf16_v3(self):
mat = torch.randn(2, 4096, 320)
sd_mha_model = SD_MHA_Model_v3(8, 320, 320).eval()
self.sd_mha_bf16_common(sd_mha_model, mat)
def test_sd_mha_bf16_scale_v3(self):
mat = torch.randn(2, 4096, 320)
sd_mha_model = SD_MHA_Model_scale_v3(8, 320, 320, 0.3).eval()
self.sd_mha_bf16_common(sd_mha_model, mat)
def test_sd_mha_bf16_v4(self):
mat1 = torch.randn(2, 4096, 320)
mat2 = torch.randn(2, 77, 320)
sd_mha_model = SD_MHA_Model_v4(8, 320, 320).eval()
self.sd_mha_bf16_common(sd_mha_model, mat1, mat2)
def test_sd_mha_bf16_scale_v4(self):
mat1 = torch.randn(2, 4096, 320)
mat2 = torch.randn(2, 77, 320)
sd_mha_model = SD_MHA_Model_scale_v4(8, 320, 320, 0.11).eval()
self.sd_mha_bf16_common(sd_mha_model, mat1, mat2)
def test_fake_sd_mha_bf16(self):
mat1 = (torch.randn(1, 2, 64, 64) + 20).to(torch.bfloat16)
mat2 = (torch.randn(1, 2, 64, 64) - 20).to(torch.bfloat16)
mat3 = torch.randn(1, 2, 64, 64).to(torch.bfloat16)
mask = (torch.ones(1, 1, 1, 64)).to(torch.bfloat16)
fake_sd_mha_model = Fake_SD_MHA_Model(64, -1).eval()
fake_mha_ipex = ipex.optimize(
fake_sd_mha_model, dtype=torch.bfloat16, level="O1"
)
with torch.cpu.amp.autocast(), torch.no_grad():
fake_mha_ipex = torch.jit.trace(
fake_mha_ipex,
(
mat1,
mat2,
mat3,
mask,
),
)
fake_mha_ipex = torch.jit.freeze(fake_mha_ipex)
for _ in range(2):
fake_mha_jit = fake_mha_ipex(mat1, mat2, mat3, mask)
fake_mha_ref = fake_sd_mha_model(mat1, mat2, mat3, mask)
self.assertEqual(fake_mha_ref, fake_mha_jit, prec=1e-1)
fake_mha_graph = fake_mha_ipex.graph_for(mat1, mat2, mat3, mask)
self.assertTrue(
any(n.kind() == "ipex::mha_scores_calc" for n in fake_mha_graph.nodes())
)
def test_transfree_mha_bf16(self):
for i in range(len(bs)):
mat = torch.randn(bs[i], seq[i], num_heads[i] * head_dims[i]).to(
torch.bfloat16
)
mask_base = torch.randn(bs[i], 1, 1, seq[i]).to(torch.bfloat16)
mask_distil = torch.randn(bs[i], seq[i]).to(torch.bfloat16)
mha_model = MHA_Model_BERT(
scales[i], num_heads[i], head_dims[i], [0, 2, 1, 3], -1, -2
).eval()
mha_ipex = ipex.optimize(mha_model, dtype=torch.bfloat16, level="O1")
vit_mha_model = MHA_Model_ViT(
scales[i], num_heads[i], head_dims[i], [2, 0, 3, 1, 4], -2, -1, 1, 2
).eval()
vit_mha_ipex = ipex.optimize(
vit_mha_model, dtype=torch.bfloat16, level="O1"
)
with torch.cpu.amp.autocast(), torch.no_grad():
mha_ipex = torch.jit.trace(
mha_ipex,
(
mat,
mask_base,
),
)
mha_ipex = torch.jit.freeze(mha_ipex)
vit_mha_ipex = torch.jit.trace(vit_mha_ipex, (mat,))
vit_mha_ipex = torch.jit.freeze(vit_mha_ipex)
for _ in range(2):
mha_jit = mha_ipex(mat, mask_base)
vit_mha_jit = vit_mha_ipex(mat)
mha_ref = mha_model(mat, mask_base)
vit_mha_ref = vit_mha_model(mat)
self.assertEqual(mha_ref, mha_jit, prec=1e-2)
self.assertEqual(vit_mha_ref, vit_mha_jit, prec=1e-2)
mha_graph = mha_ipex.graph_for(mat, mask_base)
vit_mha_graph = vit_mha_ipex.graph_for(mat)
self.assertTrue(
any(n.kind() == "ipex::bert_flash_mha" for n in mha_graph.nodes())
)
self.assertTrue(
any(
n.kind() == "ipex::transfree_vit_mha"
for n in vit_mha_graph.nodes()
)
)
for fill_value in [-float("inf"), torch.tensor(torch.finfo(float).min)]:
distil_mha_model = MHA_Model_Distil(
scales[i], num_heads[i], head_dims[i], 1, 2, 3, fill_value
).eval()
distil_mha_ipex = ipex.optimize(
distil_mha_model, dtype=torch.bfloat16, level="O1"
)
with torch.cpu.amp.autocast(), torch.no_grad():
distil_mha_ipex = torch.jit.trace(
distil_mha_ipex,
(
mat,
mask_distil,
),
)
distil_mha_ipex = torch.jit.freeze(distil_mha_ipex)
for _ in range(2):
distil_mha_jit = distil_mha_ipex(mat, mask_distil)
distil_mha_ref = distil_mha_model(mat, mask_distil)
self.assertEqual(distil_mha_ref, distil_mha_jit, prec=1e-2)
distil_mha_graph = distil_mha_ipex.graph_for(mat, mask_distil)
self.assertTrue(
any(
n.kind() == "ipex::distil_mha_scores_calc"
for n in distil_mha_graph.nodes()
)
)
def test_fake_mha_bf16(self):
mat = torch.randn(16, 16, 256).to(torch.bfloat16)
mask_base = torch.randn(16, 1, 1, 16).to(torch.bfloat16)
mask_distil = torch.randn(16, 16).to(torch.bfloat16)
fake_mha_model = []
fake_mha_ipex = []
fake_mha_model.append(MHA_Model_BERT(16, 16, 16, [0, 2, 3, 1], -1, -2).eval())
fake_mha_model.append(MHA_Model_BERT(16, 16, 16, [0, 2, 1, 3], -2, -3).eval())
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[0], dtype=torch.bfloat16, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[1], dtype=torch.bfloat16, level="O1")
)
fake_mha_model.append(MHA_Model_Distil(16, 16, 16, 1, 2, 1).eval())
fake_mha_model.append(MHA_Model_Distil(16, 16, 16, 2, 1, 3).eval())
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[2], dtype=torch.bfloat16, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[3], dtype=torch.bfloat16, level="O1")
)
fake_mha_model.append(
MHA_Model_ViT(16, 16, 16, [2, 0, 1, 3, 4], -2, -1, 1, 2).eval()
)
fake_mha_model.append(
MHA_Model_ViT(16, 16, 16, [2, 0, 3, 1, 4], -2, -3, 1, 2).eval()
)
fake_mha_model.append(
MHA_Model_ViT(16, 16, 16, [2, 0, 3, 1, 4], -2, -1, 0, 2).eval()
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[4], dtype=torch.bfloat16, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[5], dtype=torch.bfloat16, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[6], dtype=torch.bfloat16, level="O1")
)
with torch.cpu.amp.autocast(), torch.no_grad():
fake_mha_jit = []
fake_mha_ref = []
for i in range(0, 2):
fake_mha_ipex[i] = torch.jit.trace(
fake_mha_ipex[i],
(
mat,
mask_base,
),
)
fake_mha_ipex[i] = torch.jit.freeze(fake_mha_ipex[i])
for _ in range(2):
fake_mha_ipex[i](mat, mask_base)
fake_mha_jit.append(fake_mha_ipex[i](mat, mask_base))
fake_mha_ref.append(fake_mha_model[i](mat, mask_base))
fake_mha_graph = fake_mha_ipex[i].graph_for(mat, mask_base)
self.assertTrue(
any(
n.kind() == "ipex::mha_scores_calc"
for n in fake_mha_graph.nodes()
)
)
for i in range(2, 4):
fake_mha_ipex[i] = torch.jit.trace(
fake_mha_ipex[i],
(
mat,
mask_distil,
),
)
fake_mha_ipex[i] = torch.jit.freeze(fake_mha_ipex[i])
for _ in range(2):
fake_mha_ipex[i](mat, mask_distil)
fake_mha_jit.append(fake_mha_ipex[i](mat, mask_distil))
fake_mha_ref.append(fake_mha_model[i](mat, mask_distil))
fake_mha_graph = fake_mha_ipex[i].graph_for(mat, mask_distil)
self.assertTrue(
any(
n.kind() == "ipex::distil_mha_scores_calc"
for n in fake_mha_graph.nodes()
)
)
for i in range(4, 7):
fake_mha_ipex[i] = torch.jit.trace(fake_mha_ipex[i], mat)
fake_mha_ipex[i] = torch.jit.freeze(fake_mha_ipex[i])
for _ in range(2):
fake_mha_ipex[i](mat)
fake_mha_jit.append(fake_mha_ipex[i](mat))
fake_mha_ref.append(fake_mha_model[i](mat))
fake_mha_graph = fake_mha_ipex[i].graph_for(mat)
self.assertFalse(
any(
n.kind() == "ipex::transfree_vit_mha"
for n in fake_mha_graph.nodes()
)
)
for i in range(7):
self.assertEqual(fake_mha_ref[i], fake_mha_jit[i], prec=1e-2)
def test_transfree_mha_fp32(self):
for i in range(len(bs)):
mat = torch.randn(bs[i], seq[i], num_heads[i] * head_dims[i]).to(
torch.float
)
mask_base = torch.randn(bs[i], 1, 1, seq[i]).to(torch.float)
mask_distil = torch.randn(bs[i], seq[i]).to(torch.float)
mha_model = MHA_Model_BERT(
scales[i], num_heads[i], head_dims[i], [0, 2, 1, 3], -1, -2
).eval()
mha_ipex = ipex.optimize(mha_model, dtype=torch.float, level="O1")
distil_mha_model = MHA_Model_Distil(
scales[i], num_heads[i], head_dims[i], 1, 2, 3
).eval()
distil_mha_ipex = ipex.optimize(
distil_mha_model, dtype=torch.float, level="O1"
)
vit_mha_model = MHA_Model_ViT(
scales[i], num_heads[i], head_dims[i], [2, 0, 3, 1, 4], -2, -1, 1, 2
).eval()
vit_mha_ipex = ipex.optimize(vit_mha_model, dtype=torch.float, level="O1")
with torch.no_grad():
mha_ipex = torch.jit.trace(
mha_ipex,
(
mat,
mask_base,
),
)
mha_ipex = torch.jit.freeze(mha_ipex)
distil_mha_ipex = torch.jit.trace(
distil_mha_ipex,
(
mat,
mask_distil,
),
)
distil_mha_ipex = torch.jit.freeze(distil_mha_ipex)
vit_mha_ipex = torch.jit.trace(vit_mha_ipex, (mat,))
vit_mha_ipex = torch.jit.freeze(vit_mha_ipex)
for _ in range(2):
mha_jit = mha_ipex(mat, mask_base)
distil_mha_jit = distil_mha_ipex(mat, mask_distil)
vit_mha_jit = vit_mha_ipex(mat)
mha_ref = mha_model(mat, mask_base)
distil_mha_ref = distil_mha_model(mat, mask_distil)
vit_mha_ref = vit_mha_model(mat)
self.assertEqual(mha_ref, mha_jit, prec=1e-5)
self.assertEqual(distil_mha_ref, distil_mha_jit, prec=1e-5)
self.assertEqual(vit_mha_ref, vit_mha_jit, prec=1e-5)
mha_graph = mha_ipex.graph_for(mat, mask_base)
distil_mha_graph = distil_mha_ipex.graph_for(mat, mask_distil)
vit_mha_graph = vit_mha_ipex.graph_for(mat)
self.assertTrue(
any(n.kind() == "ipex::matmul_outtrans" for n in mha_graph.nodes())
)
self.assertTrue(
any(
n.kind() == "ipex::matmul_outtrans"
for n in distil_mha_graph.nodes()
)
)
self.assertTrue(
any(
n.kind() == "ipex::matmul_outtrans"
for n in vit_mha_graph.nodes()
)
)
def test_fake_mha_fp32(self):
mat = torch.randn(16, 16, 256)
mask_base = torch.randn(16, 1, 1, 16)
mask_distil = torch.randn(16, 16)
fake_mha_model = []
fake_mha_ipex = []
fake_mha_model.append(MHA_Model_BERT(16, 16, 16, [0, 2, 3, 1], -1, -2).eval())
fake_mha_model.append(MHA_Model_BERT(16, 16, 16, [0, 2, 1, 3], -2, -3).eval())
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[0], dtype=torch.float, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[1], dtype=torch.float, level="O1")
)
fake_mha_model.append(MHA_Model_Distil(16, 16, 16, 1, 2, 1).eval())
fake_mha_model.append(MHA_Model_Distil(16, 16, 16, 2, 1, 3).eval())
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[2], dtype=torch.float, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[3], dtype=torch.float, level="O1")
)
fake_mha_model.append(
MHA_Model_ViT(16, 16, 16, [2, 0, 1, 3, 4], -2, -1, 1, 2).eval()
)
fake_mha_model.append(
MHA_Model_ViT(16, 16, 16, [2, 0, 3, 1, 4], -2, -3, 1, 2).eval()
)
fake_mha_model.append(
MHA_Model_ViT(16, 16, 16, [2, 0, 3, 1, 4], -2, -1, 0, 2).eval()
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[4], dtype=torch.float, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[5], dtype=torch.float, level="O1")
)
fake_mha_ipex.append(
ipex.optimize(fake_mha_model[6], dtype=torch.float, level="O1")
)
with torch.no_grad():
fake_mha_jit = []
fake_mha_ref = []
for i in range(0, 2):
fake_mha_ipex[i] = torch.jit.trace(
fake_mha_ipex[i],
(
mat,
mask_base,
),
)
fake_mha_ipex[i] = torch.jit.freeze(fake_mha_ipex[i])
for _ in range(2):
fake_mha_ipex[i](mat, mask_base)
fake_mha_jit.append(fake_mha_ipex[i](mat, mask_base))
fake_mha_ref.append(fake_mha_model[i](mat, mask_base))
fake_mha_graph = fake_mha_ipex[i].graph_for(mat, mask_base)
self.assertTrue(
any(
n.kind() == "ipex::mha_scores_calc"
for n in fake_mha_graph.nodes()
)
)
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as p:
fake_mha_ipex[i](mat, mask_base)
if i == 0:
self.assertTrue("dil_matmul" in str(p.key_averages()))
else:
self.assertTrue("dil_mha_bmm" in str(p.key_averages()))
for i in range(2, 4):
fake_mha_ipex[i] = torch.jit.trace(
fake_mha_ipex[i],
(
mat,
mask_distil,
),
)
fake_mha_ipex[i] = torch.jit.freeze(fake_mha_ipex[i])
for _ in range(2):
fake_mha_ipex[i](mat, mask_distil)
fake_mha_jit.append(fake_mha_ipex[i](mat, mask_distil))
fake_mha_ref.append(fake_mha_model[i](mat, mask_distil))
fake_mha_graph = fake_mha_ipex[i].graph_for(mat, mask_distil)
self.assertTrue(
any(
n.kind() == "ipex::distil_mha_scores_calc"
for n in fake_mha_graph.nodes()
)
)
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as p:
fake_mha_ipex[i](mat, mask_distil)
if i == 2:
self.assertTrue("dil_mha_bmm" in str(p.key_averages()))
else:
self.assertTrue("dil_matmul" in str(p.key_averages()))
for i in range(4, 7):
fake_mha_ipex[i] = torch.jit.trace(fake_mha_ipex[i], mat)
fake_mha_ipex[i] = torch.jit.freeze(fake_mha_ipex[i])
for _ in range(2):
fake_mha_ipex[i](mat)
fake_mha_jit.append(fake_mha_ipex[i](mat))
fake_mha_ref.append(fake_mha_model[i](mat))
fake_mha_graph = fake_mha_ipex[i].graph_for(mat)
self.assertTrue(
any(n.kind() == "ipex::matmul_mul" for n in fake_mha_graph.nodes())
)
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as p:
fake_mha_ipex[i](mat)
if i == 6:
self.assertTrue("dil_matmul" in str(p.key_averages()))
else:
self.assertTrue("dil_mha_bmm" in str(p.key_averages()))
for i in range(7):
self.assertEqual(fake_mha_ref[i], fake_mha_jit[i], prec=1e-5)
if __name__ == "__main__":
test = unittest.main()
| 36,131 | 37.561366 | 99 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/itensor_size1_test.py | import torch
import intel_extension_for_pytorch as ipex
# This script is called and tested by test_conv_reorder.py, and its purpose is:
# (1) This script is testing the case that conv grad tensor shape[n, 1, h ,w] stride[h*w, 1 , w, 1],
# where its stride can be considered as both default contiguous and channelslast by PyTorch.
# (2) The main confusing thing in this case is that since it has tensor size 1, this size's stirde
# will be ignored by PyTorch (due to meanless for size 1). But for shape [n, 1, h ,w], stride
# [h*w, h*w , w, 1] is strictly default contiguous and [h*w, 1 , w, 1] is strictly channelslast.
# (3) We consider such case to remain strictly channelslast stride (calling into oneDNN), since
# channelslast is with priority.
# (4) So we do not expect any reorder of "plainformat <-> channelslast" on conv op src/dst (fwd and bwd).
# The reoders should only have 3 on this script, which are all for weight format.
m = torch.nn.Conv2d(2, 1, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
m = m.to(memory_format=torch.channels_last)
m.train()
x = torch.randn(8, 2, 224, 224).to(memory_format=torch.channels_last).requires_grad_()
origin_optimizer = torch.optim.SGD(m.parameters(), lr=0.01, momentum=0.9)
example_input = torch.randn(8, 2, 224, 224)
ipex_model, ipex_optimizer = ipex.optimize(
m,
dtype=torch.float,
optimizer=origin_optimizer,
level="O1",
sample_input=example_input,
)
y = ipex_model(x).sum()
ipex_optimizer.zero_grad()
y.backward()
ipex_optimizer.step()
| 1,528 | 48.322581 | 105 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_runtime_api.py | import unittest
import torch
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
from common_ipex_conf import runtime_thread_affinity_test_env
import subprocess
import os
class SimpleNet(torch.nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv = torch.nn.Conv2d(
64, 128, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, x):
x1 = self.conv(x)
y = torch.flatten(x1, start_dim=1)
return y
class SimpleNet_v2(torch.nn.Module):
def __init__(self):
super(SimpleNet_v2, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
self.conv2 = torch.nn.Conv2d(
64, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, x):
x1 = self.conv(x)
x1 = self.conv2(x1)
y = torch.flatten(x1, start_dim=1)
return y
class SimpleNet_dict(torch.nn.Module):
def __init__(self):
super(SimpleNet_dict, self).__init__()
self.conv = torch.nn.Conv2d(
64, 128, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, **x_dict):
x1 = self.conv(x_dict["x1"])
x2 = self.conv(x_dict["x2"])
y1 = x1 + x2
y2 = torch.flatten(y1, start_dim=1)
ret_dict = {"y1": y1, "y2": y2}
return ret_dict
class SimpleNet_tensor_dict(torch.nn.Module):
def __init__(self):
super(SimpleNet_tensor_dict, self).__init__()
self.conv = torch.nn.Conv2d(
64, 128, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, **x_dict):
x1 = self.conv(x_dict["x1"])
x2 = self.conv(x_dict["x2"])
y1 = x1 + x2
y2 = torch.flatten(y1, start_dim=1)
ret_dict = {"y1": y1, "y2": y2}
# Return a tuple of (Tensor, dict)
return y1, ret_dict
class TestInputOutputModule(torch.nn.Module):
def __init__(self):
super(TestInputOutputModule, self).__init__()
def forward(self, *args, **kwargs):
return args
class TestInputOutputModule2(torch.nn.Module):
def __init__(self):
super(TestInputOutputModule2, self).__init__()
def forward(self, param1):
return param1
class TestCPUPool(TestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
def test_cpupool_get_core_list(self):
core_list = [0, 1]
cpu_pool = ipex.cpu.runtime.CPUPool(core_list)
self.assertEqual(cpu_pool.cpu_pool.get_core_list(), core_list)
class TestCoreBinding(TestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_decorator_imperative_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
cpu_pool = ipex.cpu.runtime.CPUPool([1, 2, 3, 4])
@ipex.cpu.runtime.pin(cpu_pool)
def test(model, x):
return model(x)
y_runtime = test(model, x)
y = model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_with_context_imperative_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
cpu_pool = ipex.cpu.runtime.CPUPool([1, 2, 3, 4])
with ipex.cpu.runtime.pin(cpu_pool):
y_runtime = model(x)
y = model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_nested_with_context_imperative_model(self):
model = torch.nn.Softmax(dim=-1)
model.eval()
x = torch.rand(100, 8276)
cpu_pool = ipex.cpu.runtime.CPUPool([1, 2])
cpu_pool2 = ipex.cpu.runtime.CPUPool([3, 4])
with ipex.cpu.runtime.pin(cpu_pool):
y_runtime = model(x)
with ipex.cpu.runtime.pin(cpu_pool2):
y_runtime = model(x)
y_runtime = model(x)
y = model(x)
self.assertEqual(y, y_runtime)
class TestRuntimeAPI(TestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_imperative_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(model, cpu_pool)
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_sync_api_imperative_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(model, cpu_pool)
# Task sync submit
y_runtime = task.run_sync(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_native_function(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
def test(model, x):
return model(x)
# Calculate the reference result
y = test(model, x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(test, cpu_pool)
# Task submit and wait
y_runtime_future = task(model, x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_copy(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(model, cpu_pool)
# Copy task
task2 = task
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
y_runtime_future2 = task2(x)
y_runtime2 = y_runtime_future2.get()
self.assertEqual(y, y_runtime)
self.assertEqual(y, y_runtime2)
class TestMultiStreamModule(TestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module(self):
model = SimpleNet()
model.eval()
batch_size = ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=2, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_with_dict_return_type(self):
model = SimpleNet_dict()
model.eval()
batch_size = ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()
x1 = torch.rand(batch_size, 64, 3, 3)
x2 = torch.rand(batch_size, 64, 3, 3)
x_dict = {"x1": x1, "x2": x2}
# Calculate the reference result
y_dict = model(**x_dict)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
input_hint_object = {"x1": 0, "x2": 0}
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(
**input_hint_object
)
output_concat_object = {"y1": 0, "y2": 0}
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint(
**output_concat_object
)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model,
num_streams=2,
cpu_pool=cpu_pool,
input_split_hint=multi_stream_input_hint,
output_concat_hint=multi_stream_output_hint,
)
y_runtime_dict = multi_stream_model(**x_dict)
self.assertEqual(y_dict["y1"], y_runtime_dict["y1"])
self.assertEqual(y_dict["y2"], y_runtime_dict["y2"])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_with_tensor_and_dict_return_type(self):
model = SimpleNet_tensor_dict()
model.eval()
batch_size = ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()
x1 = torch.rand(batch_size, 64, 3, 3)
x2 = torch.rand(batch_size, 64, 3, 3)
x_dict = {"x1": x1, "x2": x2}
# Calculate the reference result
y, y_dict = model(**x_dict)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
input_hint_object = {"x1": 0, "x2": 0}
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(
**input_hint_object
)
output_concat_object = (0, {"y1": 0, "y2": 0})
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint(
output_concat_object
)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model,
num_streams=2,
cpu_pool=cpu_pool,
input_split_hint=multi_stream_input_hint,
output_concat_hint=multi_stream_output_hint,
)
y_runtime, y_runtime_dict = multi_stream_model(**x_dict)
self.assertEqual(y, y_runtime)
self.assertEqual(y_dict["y1"], y_runtime_dict["y1"])
self.assertEqual(y_dict["y2"], y_runtime_dict["y2"])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_single_stream_module(self):
model = SimpleNet()
model.eval()
batch_size = ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=1, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=1, cpu_pool=cpu_pool, concat_output=False
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, y_runtime2[0])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_core_number_not_divisible_by_stream_number(self):
model = SimpleNet()
model.eval()
num_streams = 2
batch_size = num_streams
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create MultiStreamModule
# Core Number is 3, stream Number is 2
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_less_than_stream_number(self):
model = SimpleNet()
model.eval()
num_streams = 3
batch_size = 2
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create MultiStreamModule
# Batchsize 2, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_not_divisible_by_stream_number(self):
model = SimpleNet()
model.eval()
num_streams = 3
batch_size = 4
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
y = model(x)
# Create MultiStreamModule
# Batchsize 4, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
self.assertEqual(y_runtime2[0].size(0), 2)
self.assertEqual(y_runtime2[1].size(0), 1)
self.assertEqual(y_runtime2[2].size(0), 1)
class TestModuleMultiStreamModuleHint(TestCase):
# For the inputs format which can't be jit.trace
def init_set_up(self):
# Create Multi Stream Module without concat output
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
num_streams = cpu_pool.core_ids.__len__()
return batch_size, num_streams, cpu_pool
def create_multi_stream_module(
self,
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint=None,
concat_output=True,
):
if not concat_output:
return ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
input_split_hint=multi_stream_input_hint,
)
else:
return ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
input_split_hint=multi_stream_input_hint,
output_concat_hint=multi_stream_output_hint,
)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_mix_tensor_bool_input_output_hint(self):
# This module:
# * Accept 2 tensors + 1 scalar as input
# * Return 2 tensors + 1 scalar as output
# Since Type 'Tuple[Tensor, bool, Tensor]' cannot be traced, we put this test input type in imperative mode.
model = TestInputOutputModule().eval()
batch_size, num_streams, cpu_pool = self.init_set_up()
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 3)
# Calculate the reference result
y_ref = model(input_tensor1, False, input_tensor2)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(0, None, 0)
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, None, 0))
multi_stream_model = self.create_multi_stream_module(
model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res = multi_stream_model(input_tensor1, False, input_tensor2)
self.assertEqual(y_ref, y_runtime_res)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_tuple_input_output_hint(self):
# This module:
# * Accept 1 tuple(3 tensors) as input
# * Return 1 tuple(3 tensors) as output
model = TestInputOutputModule2().eval()
batch_size, num_streams, cpu_pool = self.init_set_up()
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(batch_size, 3)
input = (input_tensor1, input_tensor2, input_tensor3)
y_ref = model(input)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0, 0))
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0, 0))
multi_stream_model = self.create_multi_stream_module(
model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res = multi_stream_model(input)
self.assertEqual(y_ref, y_runtime_res)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_dict_input_output_hint(self):
# This module:
# * Accept 1 dict(3 tensors) as input
# * Return 1 dict(3 tensors) as output
model = TestInputOutputModule2().eval()
batch_size, num_streams, cpu_pool = self.init_set_up()
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(batch_size, 3)
input = {"key1": input_tensor1, "key2": input_tensor2, "key3": input_tensor3}
y_ref = model(input)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(
{"key1": 0, "key2": 0, "key3": 0}
)
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint(
{"key1": 0, "key2": 0, "key3": 0}
)
multi_stream_model = self.create_multi_stream_module(
model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res = multi_stream_model(input)
self.assertEqual(y_ref, y_runtime_res)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_nested_tuple_input_output_hint(self):
# This module:
# * Accept nested tuple ((tensor1, tensor2), tensor3) as input
# * Return nested tuple ((tensor1, tensor2), tensor3) as output
model = TestInputOutputModule2().eval()
batch_size, num_streams, cpu_pool = self.init_set_up()
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(batch_size, 3)
input = ((input_tensor1, input_tensor2), input_tensor3)
y_ref = model(input)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(((0, 0), 0))
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint(((0, 0), 0))
multi_stream_model = self.create_multi_stream_module(
model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res = multi_stream_model(input)
self.assertEqual(y_ref, y_runtime_res)
def is_numactl_available():
numactl_available = False
cmd = ["numactl", "-C", "0", "-m", "0", "ls"]
try:
r = subprocess.run(cmd, env=os.environ)
except BaseException:
return numactl_available
if r.returncode == 0:
numactl_available = True
return numactl_available
class TestRuntimeExtensionWithNumactl(TestCase):
@unittest.skipIf(
not (is_numactl_available() and ipex.cpu.runtime.is_runtime_ext_enabled()),
"Skip when numactl is not available",
)
@runtime_thread_affinity_test_env
def test_cpupool_creation_with_numactl(self):
loc = os.path.dirname(os.path.abspath(__file__))
cmd1 = "numactl -C 0-1 -m 0 python -u {}/runtime.py --case-name={}".format(
loc, "create_cpu_pool"
)
cmd2 = "OMP_NUM_THREADS=1 KMP_AFFINITY=granularity=fine,compact,1,0 numactl -C 0-1 -m 0 \
python -u {}/runtime.py --case-name={}".format(
loc, "create_cpu_pool"
)
cmds = [cmd1, cmd2]
for cmd in cmds:
match = False
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if "The created CPUPool has core is:" in line:
x = line.split(":")
assert (
"[1]" in x[1]
), "The core ids in test_cpupool_creation with numactl is not as expected."
match = True
assert match, "Test Case Failed to create CPUPool"
if __name__ == "__main__":
test = unittest.main()
| 23,649 | 33.275362 | 116 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_dyndisp.py | import unittest
import os
import subprocess
import intel_extension_for_pytorch._C as core
supported_isa_set = [
"default",
"avx2",
"avx2_vnni",
"avx512",
"avx512_vnni",
"avx512_bf16",
"amx",
]
def get_isa_val(isa_name):
if isa_name == "default":
return 0
elif isa_name == "avx2":
return 1
elif isa_name == "avx2_vnni":
return 2
elif isa_name == "avx512":
return 3
elif isa_name == "avx512_vnni":
return 4
elif isa_name == "avx512_bf16":
return 5
elif isa_name == "amx":
return 6
else:
return 100
def get_ipex_isa_env_setting():
env_isa = os.getenv("ATEN_CPU_CAPABILITY")
return env_isa
def get_currnet_isa_level():
return core._get_current_isa_level().lower()
def get_highest_binary_support_isa_level():
return core._get_highest_binary_support_isa_level().lower()
def get_highest_cpu_support_isa_level():
return core._get_highest_cpu_support_isa_level().lower()
def check_not_sync_onednn_isa_level():
return core._check_not_sync_onednn_isa_level()
class TestDynDisp(unittest.TestCase):
def test_manual_select_kernel(self):
env_isa = get_ipex_isa_env_setting()
cur_isa = get_currnet_isa_level()
max_bin_isa = get_highest_binary_support_isa_level()
max_cpu_isa = get_highest_cpu_support_isa_level()
expected_isa_val = min(get_isa_val(max_bin_isa), get_isa_val(max_cpu_isa))
if env_isa is not None:
expected_isa_val = min(get_isa_val(env_isa), expected_isa_val)
actural_isa_val = get_isa_val(cur_isa)
# Isa level and compiler version are not linear relationship.
# gcc 9.4 can build avx512_vnni.
# gcc 11.3 start to support avx2_vnni.
self.assertTrue(actural_isa_val <= expected_isa_val)
return
def test_dyndisp_in_supported_set(self):
env_isa = get_ipex_isa_env_setting()
if env_isa is not None:
return
cur_isa = get_currnet_isa_level()
expected_isa = cur_isa in supported_isa_set
self.assertTrue(expected_isa)
return
@unittest.skipIf(
check_not_sync_onednn_isa_level(), "skip this if not sync onednn isa level"
)
def test_ipex_set_onednn_isa_level(self):
command = 'ATEN_CPU_CAPABILITY=avx2 python -c "import torch; import intel_extension_for_pytorch._C \
as core; print(core._get_current_onednn_isa_level())" '
with subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
out = p.stdout.readlines()
onednn_isa_level = str(out[-1], "utf-8").strip()
self.assertTrue(onednn_isa_level == "AVX2")
@unittest.skipIf(
check_not_sync_onednn_isa_level(), "skip this if not sync onednn isa level"
)
def test_onednn_do_not_set_isa_level(self):
command = 'ONEDNN_MAX_CPU_ISA=avx2 python -c "import torch; import intel_extension_for_pytorch._C \
as core; print(core._get_current_isa_level().lower())" '
cur_ipex_isa = get_currnet_isa_level()
with subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
out = p.stdout.readlines()
cur_ipex_isa_1 = str(out[-1], "utf-8").strip()
self.assertTrue(cur_ipex_isa == cur_ipex_isa_1)
if __name__ == "__main__":
unittest.main()
| 3,506 | 28.225 | 108 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_transfree_bmm.py | import unittest
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
class TransFree_FP32_Bmm(nn.Module):
def __init__(self):
super(TransFree_FP32_Bmm, self).__init__()
def forward(self, x1, y1):
out = torch.matmul(x1, y1)
return out
class OutTransFree_FP32_Bmm_v1(nn.Module):
def __init__(self):
super(OutTransFree_FP32_Bmm_v1, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.transpose(1, 2)
return out
class OutTransFree_FP32_Bmm_v2(nn.Module):
def __init__(self):
super(OutTransFree_FP32_Bmm_v2, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.permute(0, 2, 1, 3)
return out
class OutTransFree_FP32_Bmm_v3(nn.Module):
def __init__(self):
super(OutTransFree_FP32_Bmm_v3, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.transpose(1, 3)
return out
class OutTransFree_FP32_Bmm_v4(nn.Module):
def __init__(self):
super(OutTransFree_FP32_Bmm_v4, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.permute(0, 1, 3, 2)
return out
class TransFree_BF16_Bmm(nn.Module):
def __init__(self):
super(TransFree_BF16_Bmm, self).__init__()
def forward(self, x1, y1):
out = torch.matmul(x1, y1)
return out
class OutTransFree_BF16_Bmm_v1(nn.Module):
def __init__(self):
super(OutTransFree_BF16_Bmm_v1, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.transpose(1, 2)
return out
class OutTransFree_BF16_Bmm_v2(nn.Module):
def __init__(self):
super(OutTransFree_BF16_Bmm_v2, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.permute(0, 2, 1, 3)
return out
class OutTransFree_BF16_Bmm_v3(nn.Module):
def __init__(self):
super(OutTransFree_BF16_Bmm_v3, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.transpose(1, 3)
return out
class OutTransFree_BF16_Bmm_v4(nn.Module):
def __init__(self):
super(OutTransFree_BF16_Bmm_v4, self).__init__()
def forward(self, x1, y1):
out_ = torch.matmul(x1, y1)
out = out_.permute(0, 1, 3, 2)
return out
class TransFreeFP32BmmTester(TestCase):
def _test_transfree_fp32_bmm(self, bmm_model, bmm_ipex, x1, y1, isTransFree=True):
for i in range(len(x1)):
for j in range(len(y1)):
with torch.no_grad():
bmm_ipex = torch.jit.trace(
bmm_ipex,
(
x1[i],
y1[j],
),
)
for _ in range(2):
bmm_jit = bmm_ipex(x1[i], y1[j])
bmm_ref = bmm_model(x1[i], y1[j])
self.assertEqual(bmm_ref, bmm_jit, prec=1e-5)
bmm_graph = bmm_ipex.graph_for(x1[i], y1[j])
self.assertTrue(
any(n.kind() == "ipex::matmul" for n in bmm_graph.nodes())
)
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as p:
bmm_ipex(x1[i], y1[j])
if isTransFree is True:
self.assertFalse("aten::contiguous" in str(p.key_averages()))
else:
self.assertTrue("aten::contiguous" in str(p.key_averages()))
def _test_outtransfree_fp32_bmm(
self, bmm_model, bmm_ipex, x1, y1, isOutTransFree=True
):
for i in range(len(x1)):
for j in range(len(y1)):
with torch.no_grad():
bmm_ipex = torch.jit.trace(
bmm_ipex,
(
x1[i],
y1[j],
),
)
for _ in range(2):
bmm_jit = bmm_ipex(x1[i], y1[j])
bmm_ref = bmm_model(x1[i], y1[j])
self.assertEqual(bmm_ref, bmm_jit, prec=1e-5)
bmm_graph = bmm_ipex.graph_for(x1[i], y1[j])
if isOutTransFree is True:
self.assertTrue(
any(
n.kind() == "ipex::matmul_outtrans"
for n in bmm_graph.nodes()
)
)
else:
self.assertTrue(
any(n.kind() == "ipex::matmul" for n in bmm_graph.nodes())
)
def _test_unusual_fp32_bmm(self, bmm_model, bmm_ipex, x1, y1, isTransFree=True):
for i in range(len(x1)):
with torch.no_grad():
bmm_ipex = torch.jit.trace(
bmm_ipex,
(
x1[i],
y1[i],
),
)
for _ in range(2):
bmm_jit = bmm_ipex(x1[i], y1[i])
bmm_ref = bmm_model(x1[i], y1[i])
self.assertEqual(bmm_ref, bmm_jit, prec=1e-5)
bmm_graph = bmm_ipex.graph_for(x1[i], y1[i])
if isTransFree is True:
self.assertTrue(
any(n.kind() == "ipex::matmul" for n in bmm_graph.nodes())
)
else:
self.assertTrue(
any(n.kind() == "aten::matmul" for n in bmm_graph.nodes())
)
def test_transfree_fp32_bmm(self):
x1 = [
torch.randn(32, 13, 27, 25),
torch.randn(32, 27, 13, 25).transpose(1, 2),
torch.randn(32, 13, 25, 27).transpose(2, 3),
torch.randn(32, 25, 13, 27).transpose(2, 3).transpose(1, 3),
]
y1 = [
torch.randn(32, 13, 25, 27),
torch.randn(32, 25, 13, 27).transpose(1, 2),
torch.randn(32, 13, 27, 25).transpose(2, 3),
torch.randn(32, 27, 13, 25).transpose(2, 3).transpose(1, 3),
]
x2 = [
torch.randn(13, 32, 27, 25).transpose(0, 1),
torch.randn(32, 25, 27, 13).transpose(1, 3),
]
y2 = [
torch.randn(32, 27, 13, 25).transpose(1, 3).transpose(1, 2),
torch.randn(27, 13, 25, 32).transpose(0, 3),
]
x3 = [torch.randn(32, 27, 25), torch.randn(32, 25, 27).transpose(1, 2)]
y3 = [torch.randn(32, 25, 27), torch.randn(32, 27, 25).transpose(1, 2)]
x4 = [
torch.randn(27, 32, 25).transpose(0, 1),
torch.randn(27, 25, 32).transpose(1, 2).transpose(0, 1),
]
y4 = [
torch.randn(25, 32, 27).transpose(0, 1),
torch.randn(27, 25, 32).transpose(0, 2),
]
x5 = [
torch.randn(32, 19, 13, 27, 25),
torch.randn(32, 19, 27, 13, 25).transpose(2, 3),
torch.randn(32, 19, 13, 25, 27).transpose(-1, -2),
torch.randn(32, 13, 27, 19, 25).transpose(1, 2).transpose(1, 3),
]
y5 = [
torch.randn(32, 19, 13, 25, 29),
torch.randn(32, 13, 19, 25, 29).transpose(1, 2),
torch.randn(32, 25, 13, 19, 29).transpose(1, 3),
torch.randn(32, 19, 29, 13, 25).transpose(3, 4).transpose(2, 4),
]
x6 = [
torch.randn(32, 25, 13, 27, 19).transpose(1, 4),
torch.randn(19, 32, 13, 27, 25).transpose(0, 1),
]
y6 = [torch.randn(29, 19, 13, 25, 32).transpose(0, -1)]
ref = torch.rand(1, 1, 1, 768)
x7 = [
torch.rand(1, 1, 1, 768).to(memory_format=torch.channels_last),
torch.randn(2, 16, 32, 768)[:, :, :, 0:1],
torch.randn(2, 16, 32, 768)[:, :, :, 5],
ref[:, :, :, 10],
]
y7 = [
torch.rand(1, 1, 768, 3).to(memory_format=torch.channels_last),
torch.ones(2, 16, 1, 32),
torch.ones(2, 32, 16),
ref[:, :, :, 39],
]
x8 = [torch.randn(12, 32, 15, 30), torch.randn(2, 6, 19, 3, 8)]
y8 = [torch.randn(1, 32, 30, 29), torch.randn(1, 8, 16)]
bmm_model = TransFree_FP32_Bmm().eval()
bmm_ipex = ipex.optimize(bmm_model, dtype=torch.float, level="O1")
self._test_transfree_fp32_bmm(bmm_model, bmm_ipex, x1, y1, isTransFree=True)
self._test_transfree_fp32_bmm(bmm_model, bmm_ipex, x2, y2, isTransFree=False)
self._test_transfree_fp32_bmm(bmm_model, bmm_ipex, x3, y3, isTransFree=True)
self._test_transfree_fp32_bmm(bmm_model, bmm_ipex, x4, y4, isTransFree=False)
self._test_transfree_fp32_bmm(bmm_model, bmm_ipex, x5, y5, isTransFree=True)
self._test_transfree_fp32_bmm(bmm_model, bmm_ipex, x6, y6, isTransFree=False)
self._test_unusual_fp32_bmm(bmm_model, bmm_ipex, x7, y7, isTransFree=True)
self._test_unusual_fp32_bmm(bmm_model, bmm_ipex, x8, y8, isTransFree=False)
bmm_out_model_v1 = OutTransFree_FP32_Bmm_v1().eval()
bmm_out_ipex_v1 = ipex.optimize(bmm_out_model_v1, dtype=torch.float, level="O1")
bmm_out_model_v2 = OutTransFree_FP32_Bmm_v2().eval()
bmm_out_ipex_v2 = ipex.optimize(bmm_out_model_v2, dtype=torch.float, level="O1")
bmm_out_model_v3 = OutTransFree_FP32_Bmm_v3().eval()
bmm_out_ipex_v3 = ipex.optimize(bmm_out_model_v3, dtype=torch.float, level="O1")
bmm_out_model_v4 = OutTransFree_FP32_Bmm_v4().eval()
bmm_out_ipex_v4 = ipex.optimize(bmm_out_model_v4, dtype=torch.float, level="O1")
bmm_out_model = [
bmm_out_model_v1,
bmm_out_model_v3,
bmm_out_model_v2,
bmm_out_model_v4,
]
bmm_out_ipex = [
bmm_out_ipex_v1,
bmm_out_ipex_v3,
bmm_out_ipex_v2,
bmm_out_ipex_v4,
]
for i in range(len(bmm_out_model)):
if i % 2 == 0:
self._test_outtransfree_fp32_bmm(
bmm_out_model[i], bmm_out_ipex[i], x1, y1, isOutTransFree=True
)
self._test_outtransfree_fp32_bmm(
bmm_out_model[i], bmm_out_ipex[i], x2, y2, isOutTransFree=True
)
else:
self._test_outtransfree_fp32_bmm(
bmm_out_model[i], bmm_out_ipex[i], x1, y1, isOutTransFree=False
)
self._test_outtransfree_fp32_bmm(
bmm_out_model[i], bmm_out_ipex[i], x2, y2, isOutTransFree=False
)
class TransFreeBF16BmmTester(TestCase):
def _test_transfree_bf16_bmm(self, bmm_model, bmm_ipex, x1, y1, isTransFree=True):
for i in range(len(x1)):
for j in range(len(y1)):
with torch.cpu.amp.autocast(), torch.no_grad():
bmm_ipex = torch.jit.trace(
bmm_ipex,
(
x1[i],
y1[j],
),
)
for _ in range(2):
bmm_jit = bmm_ipex(x1[i], y1[j])
bmm_ref = bmm_model(x1[i], y1[j])
# AssertionError: tensor(0.0625) not less than or equal to 0.01
self.assertEqual(bmm_ref, bmm_jit, prec=7e-2)
bmm_graph = bmm_ipex.graph_for(x1[i], y1[j])
self.assertTrue(
any(n.kind() == "ipex::matmul" for n in bmm_graph.nodes())
)
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as p:
bmm_ipex(x1[i], y1[j])
if isTransFree is True:
self.assertFalse("aten::contiguous" in str(p.key_averages()))
else:
self.assertTrue("aten::contiguous" in str(p.key_averages()))
def _test_outtransfree_bf16_bmm(
self, bmm_model, bmm_ipex, x1, y1, isOutTransFree=True
):
for i in range(len(x1)):
for j in range(len(y1)):
with torch.cpu.amp.autocast(), torch.no_grad():
bmm_ipex = torch.jit.trace(
bmm_ipex,
(
x1[i],
y1[j],
),
)
for _ in range(2):
bmm_jit = bmm_ipex(x1[i], y1[j])
bmm_ref = bmm_model(x1[i], y1[j])
# AssertionError: tensor(0.0625) not less than or equal to 0.01
self.assertEqual(bmm_ref, bmm_jit, prec=7e-2)
bmm_graph = bmm_ipex.graph_for(x1[i], y1[j])
if isOutTransFree is True:
self.assertTrue(
any(
n.kind() == "ipex::matmul_outtrans"
for n in bmm_graph.nodes()
)
)
else:
self.assertTrue(
any(n.kind() == "ipex::matmul" for n in bmm_graph.nodes())
)
def test_transfree_bf16_bmm(self):
x1 = [
torch.randn(32, 13, 27, 25).to(torch.bfloat16),
torch.randn(32, 27, 13, 25).to(torch.bfloat16).transpose(1, 2),
torch.randn(32, 13, 25, 27).to(torch.bfloat16).transpose(2, 3),
torch.randn(32, 25, 13, 27)
.to(torch.bfloat16)
.transpose(2, 3)
.transpose(1, 3),
]
y1 = [
torch.randn(32, 13, 25, 27).to(torch.bfloat16),
torch.randn(32, 25, 13, 27).to(torch.bfloat16).transpose(1, 2),
torch.randn(32, 13, 27, 25).to(torch.bfloat16).transpose(2, 3),
torch.randn(32, 27, 13, 25)
.to(torch.bfloat16)
.transpose(2, 3)
.transpose(1, 3),
]
x2 = [
torch.randn(13, 32, 27, 25).to(torch.bfloat16).transpose(0, 1),
torch.randn(32, 25, 27, 13).to(torch.bfloat16).transpose(1, 3),
]
y2 = [
torch.randn(32, 27, 13, 25)
.to(torch.bfloat16)
.transpose(1, 3)
.transpose(1, 2),
torch.randn(27, 13, 25, 32).to(torch.bfloat16).transpose(0, 3),
]
x3 = [
torch.randn(32, 27, 25).to(torch.bfloat16),
torch.randn(32, 25, 27).to(torch.bfloat16).transpose(1, 2),
]
y3 = [
torch.randn(32, 25, 27).to(torch.bfloat16),
torch.randn(32, 27, 25).to(torch.bfloat16).transpose(1, 2),
]
x4 = [
torch.randn(27, 32, 25).to(torch.bfloat16).transpose(0, 1),
torch.randn(27, 25, 32).to(torch.bfloat16).transpose(1, 2).transpose(0, 1),
]
y4 = [
torch.randn(25, 32, 27).to(torch.bfloat16).transpose(0, 1),
torch.randn(27, 25, 32).to(torch.bfloat16).transpose(0, 2),
]
bmm_model = TransFree_BF16_Bmm().eval()
bmm_ipex = ipex.optimize(bmm_model, dtype=torch.bfloat16, level="O1")
self._test_transfree_bf16_bmm(bmm_model, bmm_ipex, x1, y1, isTransFree=True)
self._test_transfree_bf16_bmm(bmm_model, bmm_ipex, x2, y2, isTransFree=False)
self._test_transfree_bf16_bmm(bmm_model, bmm_ipex, x3, y3, isTransFree=True)
self._test_transfree_bf16_bmm(bmm_model, bmm_ipex, x4, y4, isTransFree=False)
bmm_out_model_v1 = OutTransFree_BF16_Bmm_v1().eval()
bmm_out_ipex_v1 = ipex.optimize(
bmm_out_model_v1, dtype=torch.bfloat16, level="O1"
)
bmm_out_model_v2 = OutTransFree_BF16_Bmm_v2().eval()
bmm_out_ipex_v2 = ipex.optimize(
bmm_out_model_v2, dtype=torch.bfloat16, level="O1"
)
bmm_out_model_v3 = OutTransFree_BF16_Bmm_v3().eval()
bmm_out_ipex_v3 = ipex.optimize(
bmm_out_model_v3, dtype=torch.bfloat16, level="O1"
)
bmm_out_model_v4 = OutTransFree_BF16_Bmm_v4().eval()
bmm_out_ipex_v4 = ipex.optimize(
bmm_out_model_v4, dtype=torch.bfloat16, level="O1"
)
bmm_out_model = [
bmm_out_model_v1,
bmm_out_model_v3,
bmm_out_model_v2,
bmm_out_model_v4,
]
bmm_out_ipex = [
bmm_out_ipex_v1,
bmm_out_ipex_v3,
bmm_out_ipex_v2,
bmm_out_ipex_v4,
]
for i in range(len(bmm_out_model)):
if i % 2 == 0:
self._test_outtransfree_bf16_bmm(
bmm_out_model[i], bmm_out_ipex[i], x1, y1, isOutTransFree=True
)
self._test_outtransfree_bf16_bmm(
bmm_out_model[i], bmm_out_ipex[i], x2, y2, isOutTransFree=True
)
else:
self._test_outtransfree_bf16_bmm(
bmm_out_model[i], bmm_out_ipex[i], x1, y1, isOutTransFree=False
)
self._test_outtransfree_bf16_bmm(
bmm_out_model[i], bmm_out_ipex[i], x2, y2, isOutTransFree=False
)
if __name__ == "__main__":
test = unittest.main()
| 17,934 | 35.159274 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_torch_compile.py | import unittest
import itertools
import copy
import torch
import torch.nn.functional as F
from torch.optim import SGD
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
convtranspose_module = {
1: torch.nn.ConvTranspose1d,
2: torch.nn.ConvTranspose2d,
3: torch.nn.ConvTranspose3d,
}
class ConvNd_Relu(torch.nn.Module):
def __init__(
self,
dim,
in_channels,
out_channels,
kernel_size,
):
super(ConvNd_Relu, self).__init__()
self.conv = conv_module[dim](
in_channels,
out_channels,
kernel_size=kernel_size,
)
def forward(self, x):
return F.relu(self.conv(x))
class Linear_Relu(torch.nn.Module):
def __init__(self, in_f, out_f):
super(Linear_Relu, self).__init__()
self.linear = torch.nn.Linear(in_f, out_f)
def forward(self, x):
return F.relu(self.linear(x))
class DeconvNd_Relu(torch.nn.Module):
def __init__(
self,
dim,
ic,
oc,
kernel_size,
):
super(DeconvNd_Relu, self).__init__()
self.deconv = convtranspose_module[dim](
ic,
oc,
kernel_size=kernel_size,
)
def forward(self, x):
return F.relu(self.deconv(x))
class Lstm(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super(Lstm, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
class TestCompileCases(TestCase):
def test_conv_relu_inference(self):
for dim in [1, 2, 3]:
input_shapes = {1: (4,), 2: (4, 4), 3: (4, 4, 4)}
options = itertools.product(
[torch.float32, torch.bfloat16],
["torchscript", "inductor"],
[True, False],
[True, False],
[True, False],
)
for (
dtype,
compiler_backend,
dynamic,
ipex_optimize,
weight_prepack,
) in options:
if weight_prepack is True and ipex_optimize is False:
continue
N = 2
M = 2
C = 3
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
model = ConvNd_Relu(
dim=dim,
in_channels=C,
out_channels=M,
kernel_size=3,
).eval()
if ipex_optimize:
# TODO: support channels_last_1d.
if dim == 1:
ipex.disable_auto_channels_last()
else:
ipex.enable_auto_channels_last()
model = ipex.optimize(
model, weights_prepack=weight_prepack, dtype=dtype
)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
ori_y = model(x)
for _ in range(3):
y = compile_model(x)
self.assertEqual(y, ori_y)
self.assertTrue(y.dtype == dtype)
def test_conv_relu_train(self):
for dim in [1, 2, 3]:
input_shapes = {1: (4,), 2: (4, 4), 3: (4, 4, 4)}
options = itertools.product(
[torch.float32, torch.bfloat16],
["inductor"],
[True, False],
[True, False],
[True, False],
)
for (
dtype,
compiler_backend,
dynamic,
ipex_optimize,
weight_prepack,
) in options:
if weight_prepack is True and ipex_optimize is False:
continue
N = 2
M = 2
C = 3
x_shape = (N, C) + input_shapes[dim]
input = torch.randn(x_shape, dtype=torch.float32)
ori_x = input.clone().requires_grad_()
x = input.clone().requires_grad_()
conv = ConvNd_Relu(
dim=dim,
in_channels=C,
out_channels=M,
kernel_size=3,
)
ori_model = copy.deepcopy(conv).train()
model = copy.deepcopy(conv).train()
optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)
if ipex_optimize:
# TODO: support channels_last_1d.
if dim == 1:
ipex.disable_auto_channels_last()
else:
ipex.enable_auto_channels_last()
ori_model, _ = ipex.optimize(
ori_model,
weights_prepack=weight_prepack,
dtype=dtype,
optimizer=optimizer,
)
model, _ = ipex.optimize(
model,
weights_prepack=weight_prepack,
dtype=dtype,
optimizer=optimizer,
)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
):
ori_y = ori_model(ori_x)
y = compile_model(x)
grad_x = torch.randn(y.shape, dtype=torch.float32)
ori_y.backward(grad_x)
y.backward(grad_x)
self.assertEqual(y, ori_y)
self.assertTrue(y.dtype == dtype)
self.assertEqual(x.grad, ori_x.grad)
def test_deconv_relu_inference(self):
for dim in [1, 2, 3]:
input_shapes = {1: (4,), 2: (4, 4), 3: (4, 4, 4)}
input_channel_per_group = 6
output_channel_per_group = 3
kernel_size = 3
options = itertools.product(
[torch.float32, torch.bfloat16],
["torchscript", "inductor"],
[True, False],
[True, False],
[True, False],
)
for (
dtype,
compiler_backend,
dynamic,
ipex_optimize,
weight_prepack,
) in options:
if weight_prepack is True and ipex_optimize is False:
continue
ic = input_channel_per_group
oc = output_channel_per_group
x_shape = (2, ic) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
model = DeconvNd_Relu(dim, ic, oc, kernel_size).eval()
if ipex_optimize:
# TODO: support channels_last_1d.
if dim == 1:
ipex.disable_auto_channels_last()
else:
ipex.enable_auto_channels_last()
model = ipex.optimize(
model, weights_prepack=weight_prepack, dtype=dtype
)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
ori_y = model(x)
for _ in range(3):
y = compile_model(x)
self.assertEqual(y, ori_y)
self.assertTrue(y.dtype == dtype)
def test_deconv_relu_train(self):
for dim in [1, 2, 3]:
input_shapes = {1: (4,), 2: (4, 4), 3: (4, 4, 4)}
input_channel_per_group = 6
output_channel_per_group = 3
kernel_size = 3
options = itertools.product(
[torch.float32, torch.bfloat16],
["inductor"],
[True, False],
[True, False],
[True, False],
)
for (
dtype,
compiler_backend,
dynamic,
ipex_optimize,
weight_prepack,
) in options:
if weight_prepack is True and ipex_optimize is False:
continue
ic = input_channel_per_group
oc = output_channel_per_group
x_shape = (2, ic) + input_shapes[dim]
input = torch.randn(x_shape, dtype=torch.float32)
ori_x = input.clone().requires_grad_()
x = input.clone().requires_grad_()
deconv = DeconvNd_Relu(dim, ic, oc, kernel_size)
ori_model = copy.deepcopy(deconv).train()
model = copy.deepcopy(deconv).train()
optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)
if ipex_optimize:
# TODO: support channels_last_1d.
if dim == 1:
ipex.disable_auto_channels_last()
else:
ipex.enable_auto_channels_last()
ori_model, _ = ipex.optimize(
ori_model,
weights_prepack=weight_prepack,
dtype=dtype,
optimizer=optimizer,
)
model, _ = ipex.optimize(
model,
weights_prepack=weight_prepack,
dtype=dtype,
optimizer=optimizer,
)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
):
ori_y = ori_model(ori_x)
y = compile_model(x)
grad_x = torch.randn(ori_y.shape, dtype=torch.float32)
ori_y.backward(grad_x)
y.backward(grad_x)
self.assertEqual(y, ori_y)
self.assertTrue(y.dtype == dtype)
self.assertEqual(x.grad, ori_x.grad)
def test_linear_relu_inference(self):
out_features = 4
in_features = 3
input_shapes = [(2, in_features), (2, 2, in_features), (2, 2, 2, in_features)]
options = itertools.product(
input_shapes,
[torch.float32, torch.bfloat16],
["torchscript", "inductor"],
[True, False],
[True, False],
[True, False],
)
for (
x_shape,
dtype,
compiler_backend,
dynamic,
ipex_optimize,
weight_prepack,
) in options:
if weight_prepack is True and ipex_optimize is False:
continue
x = torch.randn(x_shape, dtype=torch.float32)
model = Linear_Relu(in_features, out_features).eval()
if ipex_optimize:
model = ipex.optimize(
model, weights_prepack=weight_prepack, dtype=dtype
)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
ori_y = model(x)
for _ in range(3):
y = compile_model(x)
self.assertEqual(y, ori_y, prec=0.01)
self.assertTrue(y.dtype == dtype)
def test_linear_relu_train(self):
out_features = 4
in_features = 3
input_shapes = [(2, in_features), (2, 2, in_features), (2, 2, 2, in_features)]
options = itertools.product(
input_shapes,
[torch.float32, torch.bfloat16],
["inductor"],
[True, False],
[True, False],
[True, False],
)
for (
x_shape,
dtype,
compiler_backend,
dynamic,
ipex_optimize,
weight_prepack,
) in options:
if weight_prepack is True and ipex_optimize is False:
continue
input = torch.randn(x_shape, dtype=torch.float32)
ori_x = input.clone().requires_grad_()
x = input.clone().requires_grad_()
linear = Linear_Relu(in_features, out_features)
ori_model = copy.deepcopy(linear).train()
model = copy.deepcopy(linear).train()
optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)
if ipex_optimize:
ori_model, _ = ipex.optimize(
ori_model,
weights_prepack=weight_prepack,
dtype=dtype,
optimizer=optimizer,
)
model, _ = ipex.optimize(
model,
weights_prepack=weight_prepack,
dtype=dtype,
optimizer=optimizer,
)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
):
ori_y = ori_model(ori_x)
y = compile_model(x)
grad_x = torch.randn(ori_y.shape, dtype=torch.float32)
ori_y.backward(grad_x)
y.backward(grad_x)
self.assertEqual(y, ori_y, prec=0.01)
self.assertTrue(y.dtype == dtype)
self.assertEqual(x.grad, ori_x.grad, prec=0.01)
def test_lstm_inference(self):
options = itertools.product(
[torch.float32, torch.bfloat16],
["torchscript", "inductor"],
[True, False],
[True, False],
)
for dtype, compiler_backend, dynamic, ipex_optimize in options:
input = torch.randn(5, 3, 10)
h0 = torch.randn(2, 3, 20)
c0 = torch.randn(2, 3, 20)
model = Lstm(10, 20, 2).eval()
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
ori_output, (ori_hn, ori_cn) = model(input, (h0, c0))
if ipex_optimize:
model = ipex.optimize(model, dtype=dtype)
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
compile_model = torch.compile(model, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
output, (hn, cn) = compile_model(input, (h0, c0))
self.assertEqual(ori_output, output)
self.assertEqual(ori_hn, hn)
self.assertEqual(ori_cn, cn)
self.assertTrue(output.dtype == dtype)
self.assertTrue(hn.dtype == dtype)
self.assertTrue(cn.dtype == dtype)
if __name__ == "__main__":
torch.manual_seed(2020)
test = unittest.main()
| 16,586 | 36.274157 | 86 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_fx_optimization.py | import unittest
import torch
import intel_extension_for_pytorch as ipex
from intel_extension_for_pytorch.nn.utils._weight_prepack import (
_IPEXLinear as _IPEXLinear,
)
from torch.testing._internal.common_utils import TestCase
from typing import List
import random
import copy
import itertools
import os
try:
import transformers # noqa: F401
HAS_TRANSFORMERS = True
except ImportError:
HAS_TRANSFORMERS = False
except RuntimeError:
HAS_TRANSFORMERS = False
skipIfNoTRANSFORMERS = unittest.skipIf(not HAS_TRANSFORMERS, "no transformers")
try:
import diffusers
HAS_DIFFUSERS = True
except ImportError:
HAS_DIFFUSERS = False
except RuntimeError:
HAS_DIFFUSERS = False
skipIfNoDIFFUSERS = unittest.skipIf(not HAS_DIFFUSERS, "no diffusers")
class MultipleLinear(torch.nn.Module):
def __init__(
self, out_fs: List[int], in_fs: List[int], bias: bool, dtype: torch.dtype
):
super(MultipleLinear, self).__init__()
self.l0 = torch.nn.Linear(in_fs[0], out_fs[0], bias=bias, dtype=dtype)
self.l1 = torch.nn.Linear(in_fs[1], out_fs[1], bias=bias, dtype=dtype)
self.l2 = torch.nn.Linear(in_fs[2], out_fs[2], bias=bias, dtype=dtype)
def forward(self, x):
out0 = self.l0(x)
out1 = self.l1(x)
out2 = self.l2(x)
return out0, out1, out2
class FxTester(TestCase):
def _check_concat(self, model_before_concat, model_after_concat):
def is_linear(m):
return isinstance(child, torch.nn.Linear) or isinstance(child, _IPEXLinear)
# checkout whether all linears on model_before_concat
# is concated on model_after_concat
total_out_f = 0
for _, child in model_before_concat.named_modules():
if is_linear(child):
total_out_f += child.out_features
found_linear = False
for _, child in model_after_concat.named_modules():
if is_linear(child):
self.assertFalse(found_linear)
self.assertEqual(child.out_features, total_out_f)
found_linear = True
self.assertTrue(found_linear)
def test_concat_linear(self):
_bias = [True, False]
_inplace = [True, False]
_in_feature = [16, 129]
_dtype = [torch.float, torch.bfloat16]
options = itertools.product(_bias, _inplace, _in_feature, _dtype)
for bias, inplace, in_feature, dtype in options:
x = torch.randn(100, in_feature, dtype=dtype)
out_fs = random.sample(range(128), 3)
in_fs = [in_feature] * 3
m = MultipleLinear(out_fs, in_fs, bias, dtype)
y1 = m(x)
gm = torch.fx.symbolic_trace(m)
y2 = gm(x)
self.assertEqual(y1, y2)
concat_gm = ipex.fx.concat_linear.concat_linear(
copy.deepcopy(gm), inplace=inplace
)
y3 = concat_gm(x)
self.assertEqual(y1, y3)
# checkout success concat
self._check_concat(gm, concat_gm)
@skipIfNoTRANSFORMERS
def test_concat_linear_hf_bert(self):
from transformers import AutoModelForCausalLM, AutoConfig
from transformers.utils.fx import symbolic_trace as hf_symbolic_trace
loc = os.path.dirname(os.path.abspath(__file__))
config = AutoConfig.from_pretrained(loc + "/bert-base-config.json")
model = AutoModelForCausalLM.from_config(config)
model.eval()
inputs = torch.load(loc + "/bert-inputs.pt")
gm = hf_symbolic_trace(model, input_names=list(inputs.keys()))
ref_out = gm(**inputs)
concat_gm = ipex.fx.concat_linear.concat_linear(copy.deepcopy(gm), inplace=True)
out = concat_gm(**inputs)
self.assertEqual(out, ref_out)
# checkout success concat
for layer_id in range(12):
self_att = getattr(gm.bert.encoder.layer, str(layer_id)).attention.self
concat_self_att = getattr(
concat_gm.bert.encoder.layer, str(layer_id)
).attention.self
self._check_concat(self_att, concat_self_att)
@skipIfNoTRANSFORMERS
def test_automatically_apply_concat_linear_with_ipex_optimize(self):
from transformers import AutoModelForCausalLM, AutoConfig
loc = os.path.dirname(os.path.abspath(__file__))
config = AutoConfig.from_pretrained(loc + "/bert-base-config.json")
base_model = AutoModelForCausalLM.from_config(config).eval()
inputs = torch.load(loc + "/bert-inputs.pt")
for dtype in [torch.float, torch.bfloat16]:
for inplace in [True, False]:
model = copy.deepcopy(base_model)
auto_cast = dtype == torch.bfloat16
with torch.cpu.amp.autocast(auto_cast, dtype=torch.bfloat16):
ref_out = model(**inputs)
ipex_model = ipex.optimize(
model, dtype=dtype, inplace=inplace, concat_linear=True
)
with torch.cpu.amp.autocast(auto_cast, dtype=torch.bfloat16):
out = ipex_model(**inputs)
if dtype == torch.bfloat16:
self.assertEqual(out, ref_out, rtol=5e-2, atol=5e-2)
else:
self.assertEqual(out, ref_out)
for layer_id in range(12):
self_att = getattr(
model.bert.encoder.layer, str(layer_id)
).attention.self
concat_self_att = getattr(
ipex_model.bert.encoder.layer, str(layer_id)
).attention.self
self._check_concat(self_att, concat_self_att)
@skipIfNoDIFFUSERS
def test_stable_diffusion(self):
def check_unet_concated(model):
for child in model.children():
check_unet_concated(child)
if isinstance(model, diffusers.models.attention.BasicTransformerBlock):
attn1 = model.attn1
attn2 = model.attn2
original_out_f = attn2.L__self___to_q.out_features
self.assertTrue(hasattr(attn1, "L__self___to_q"))
self.assertFalse(hasattr(attn1, "L__self___to_k"))
self.assertFalse(hasattr(attn1, "L__self___to_v"))
self.assertEqual(original_out_f * 3, attn1.L__self___to_q.out_features)
self.assertTrue(hasattr(attn2, "L__self___to_q"))
self.assertTrue(hasattr(attn2, "L__self___to_k"))
self.assertFalse(hasattr(attn2, "L__self___to_v"))
self.assertEqual(original_out_f * 2, attn2.L__self___to_k.out_features)
models_list = [
"stabilityai/stable-diffusion-2-1",
"runwayml/stable-diffusion-v1-5",
"CompVis/stable-diffusion-v1-4",
]
loc = os.path.dirname(os.path.abspath(__file__))
for model_id in models_list:
config_dir = loc + "/stable-difusion-config/" + model_id
unet_cls = diffusers.models.unet_2d_condition.UNet2DConditionModel
base_model = unet_cls.from_config(config_dir).eval()
if model_id == "stabilityai/stable-diffusion-2-1":
input = (
torch.randn(4, 4, 64, 64),
torch.tensor(921),
torch.randn(4, 77, 1024),
)
else:
input = (
torch.randn(2, 4, 64, 64),
torch.tensor(921),
torch.randn(2, 77, 768),
)
for dtype in [torch.float, torch.bfloat16]:
for inplace in [True, False]:
model1 = copy.deepcopy(base_model)
model2 = copy.deepcopy(base_model)
auto_cast = dtype == torch.bfloat16
ipex_model1 = ipex.optimize(
model1, dtype=dtype, inplace=inplace, concat_linear=True
)
check_unet_concated(ipex_model1)
ipex_model2 = ipex.optimize(
model2, dtype=dtype, inplace=inplace, concat_linear=False
)
with torch.cpu.amp.autocast(auto_cast, dtype=torch.bfloat16):
out1 = ipex_model1(*input)
out2 = ipex_model2(*input)
self.assertEqual(out1, out2)
if __name__ == "__main__":
test = unittest.main()
| 8,552 | 39.535545 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_tensorexpr.py | import torch
import torch.nn as nn
from torch.testing._internal.jit_utils import JitTestCase
import unittest
import torch.nn.functional as F
import time
def get_rand_seed():
return int(time.time() * 1000000000)
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
from typing import Dict, NamedTuple
class EltwiseFusionOp(NamedTuple):
ipex_eltwise_op: str
op_input_list: Dict = {}
unary_PyTorch_op_to_IPEX_op_map = {
torch.relu: EltwiseFusionOp("relu"),
torch.relu_: EltwiseFusionOp("relu_"),
torch.abs: EltwiseFusionOp("abs"),
torch.abs_: EltwiseFusionOp("abs_"),
torch.exp: EltwiseFusionOp("exp"),
torch.exp_: EltwiseFusionOp("exp_"),
nn.Hardswish(inplace=False): EltwiseFusionOp("hardswish"),
nn.Hardswish(inplace=True): EltwiseFusionOp("hardswish_"),
torch.log: EltwiseFusionOp("log"),
torch.log_: EltwiseFusionOp("log_"),
nn.Mish(inplace=False): EltwiseFusionOp("mish"),
nn.Mish(inplace=True): EltwiseFusionOp("mish_"),
torch.sigmoid: EltwiseFusionOp("sigmoid"),
torch.sigmoid_: EltwiseFusionOp("sigmoid_"),
torch.round: EltwiseFusionOp("round"),
torch.round_: EltwiseFusionOp("round_"),
torch.sqrt: EltwiseFusionOp("sqrt"),
torch.sqrt_: EltwiseFusionOp("sqrt_"),
torch.square: EltwiseFusionOp("square"),
torch.square_: EltwiseFusionOp("square_"),
torch.tanh: EltwiseFusionOp("tanh"),
torch.tanh_: EltwiseFusionOp("tanh_"),
nn.SiLU(inplace=False): EltwiseFusionOp("silu"),
nn.SiLU(inplace=True): EltwiseFusionOp("silu_"),
nn.Hardsigmoid(inplace=False): EltwiseFusionOp("hardsigmoid"),
nn.Hardsigmoid(inplace=True): EltwiseFusionOp("hardsigmoid_"),
}
non_unary_PyTorch_op_to_IPEX_op_map = {
torch.clamp: EltwiseFusionOp("clamp", op_input_list={"min": -2, "max": 3}),
torch.clamp_: EltwiseFusionOp("clamp_", op_input_list={"min": -2, "max": 3}),
nn.GELU(approximate="none"): EltwiseFusionOp("gelu(none)"),
nn.GELU(approximate="tanh"): EltwiseFusionOp("gelu(tanh)"),
nn.ELU(inplace=False): EltwiseFusionOp("elu"),
nn.ELU(inplace=True): EltwiseFusionOp("elu_"),
torch.pow: EltwiseFusionOp("pow", op_input_list={"exponent": 2}),
lambda t: t.pow_(2): EltwiseFusionOp("pow_"),
nn.LeakyReLU(negative_slope=0.02, inplace=False): EltwiseFusionOp("leaky_relu"),
nn.LeakyReLU(negative_slope=0.02, inplace=True): EltwiseFusionOp("leaky_relu_"),
}
class ConvEltwise(nn.Module):
def __init__(
self,
eltwise_fn,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs
):
super(ConvEltwise, self).__init__()
self.conv = conv_module[dim](in_channels, out_channels, kernel_size)
self.eltwise = eltwise_fn
self.kwargs = kwargs
def forward(self, x):
a = self.conv(x)
b = self.eltwise(a, **self.kwargs)
return b
class IPEXConvAdd(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(IPEXConvAdd, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.conv2 = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = self.conv1(x)
b = self.conv2(x)
return a.add_(b)
class IPEXConvAddRelu(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(IPEXConvAddRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.conv2 = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = F.relu(self.conv1(x))
b = self.conv2(x)
return F.relu(a.add_(b), inplace=True)
class IPEXConvConvRelu(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(IPEXConvConvRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.conv2 = torch.nn.Conv2d(out_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
res = self.conv1(x)
res = self.conv2(res)
return F.relu(res, inplace=True)
class IPEXConvSigmoidMul(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(IPEXConvSigmoidMul, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = self.conv(x)
b = torch.sigmoid(a)
return a.mul_(b)
class LinearEltwise(nn.Module):
def __init__(self, eltwise_fn, in_channels, out_channels, bias, **kwargs):
super(LinearEltwise, self).__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=bias)
self.eltwise = eltwise_fn
self.kwargs = kwargs
def forward(self, x):
a = self.linear(x)
a = a / 2
b = self.eltwise(a, **self.kwargs)
return b
class IPEXLinearAdd(nn.Module):
def __init__(self, in_channels, out_channels, bias):
super(IPEXLinearAdd, self).__init__()
self.linear1 = nn.Linear(in_channels, out_channels, bias=bias)
self.linear2 = nn.Linear(in_channels, out_channels, bias=bias)
def forward(self, x):
a = self.linear1(x)
b = self.linear2(x)
return a.add_(b)
class IPEXLinearAddRelu(nn.Module):
def __init__(self, in_channels, out_channels, bias):
super(IPEXLinearAddRelu, self).__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=bias)
def forward(self, x):
a = F.relu(self.linear(x))
b = self.linear(x)
return F.relu(a.add_(b), inplace=True)
class IPEXLinearSigmoidMul(nn.Module):
def __init__(self, in_channels, out_channels, bias):
super(IPEXLinearSigmoidMul, self).__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=bias)
def forward(self, x):
a = self.linear(x)
b = torch.sigmoid(a)
return a.mul_(b)
class IPEXMatmulDiv(nn.Module):
def __init__(self):
super(IPEXMatmulDiv, self).__init__()
seed = 2018
torch.manual_seed(seed)
def forward(self, x1, x2, x3):
return torch.matmul(x1, x2) / x3 + x3
class TestTE(JitTestCase):
def test_ipex_unary_conv_fusion(self, op_list=unary_PyTorch_op_to_IPEX_op_map):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
dim = 2
out_channels = 16
in_channels = 3
kernel_size = 3
for eltwise in op_list:
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
fusion_op = op_list[eltwise]
ipex_eltwise_op = fusion_op.ipex_eltwise_op
print("TEST conv2d+%s" % ipex_eltwise_op)
for use_channels_last in [0, 1]:
for batch_size, image_size in [[8, 20], [3, 256]]:
input_size = [batch_size, in_channels, image_size, image_size]
x = torch.randn(input_size)
te_model = ConvEltwise(
eltwise, dim, in_channels, out_channels, kernel_size, image_size
).eval()
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
te_model = te_model.to(memory_format=torch.channels_last)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
"{}, {}".format(res_jit, res_imperative),
)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_non_unary_conv_fusion(
self, op_list=non_unary_PyTorch_op_to_IPEX_op_map
):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
dim = 2
out_channels = 16
in_channels = 3
kernel_size = 3
for eltwise in op_list:
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
fusion_op = op_list[eltwise]
ipex_eltwise_op = fusion_op.ipex_eltwise_op
print("TEST conv2d+%s" % ipex_eltwise_op)
for use_channels_last in [0, 1]:
for batch_size, image_size in [[8, 20], [3, 256]]:
input_size = [batch_size, in_channels, image_size, image_size]
x = torch.randn(input_size)
op_input_list = fusion_op.op_input_list
te_model = ConvEltwise(
eltwise,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**op_input_list
).eval()
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
te_model = te_model.to(memory_format=torch.channels_last)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
"{}, {}".format(res_jit, res_imperative),
)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_conv_add(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST conv2d+add")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for use_channels_last in [0, 1]:
te_model = IPEXConvAdd(3, 2, kernel_size=(3, 3)).eval()
x = torch.randn(1, 3, 10, 10)
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
te_model = te_model.to(memory_format=torch.channels_last)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
x = torch.randn(3, 3, 20, 20)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_conv_add_relu(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST conv2d+add+relu")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for use_channels_last in [0, 1]:
te_model = IPEXConvAddRelu(3, 2, kernel_size=(3, 3)).eval()
x = torch.randn(1, 3, 10, 10)
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
te_model = te_model.to(memory_format=torch.channels_last)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
x = torch.randn(3, 3, 20, 20)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_conv_conv_relu(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST conv bottleneck")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for use_channels_last in [0, 1]:
te_model = IPEXConvConvRelu(3, 10, kernel_size=(3, 3)).eval()
x = torch.randn(1, 3, 224, 224)
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
te_model = te_model.to(memory_format=torch.channels_last)
te_model_traced = torch.jit.script(te_model)
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
x = torch.randn(3, 3, 500, 500)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_conv_sigmoid_mul(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST conv2d+sigmoid+mul")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for use_channels_last in [0, 1]:
te_model = IPEXConvSigmoidMul(3, 2, kernel_size=(3, 3)).eval()
x = torch.randn(1, 3, 10, 10)
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
te_model = te_model.to(memory_format=torch.channels_last)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
x = torch.randn(3, 3, 20, 20)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(res_jit, res_imperative)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_matmul_div(self):
print("TEST conv matmul+div")
te_matmul_div = IPEXMatmulDiv()
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
x1 = torch.randn(5, 5)
x2 = torch.randn(5, 5)
x3 = torch.randn(5, 5)
te_matmul_div_traced = torch.jit.script(te_matmul_div).eval()
te_matmul_div_traced = torch.jit.freeze(te_matmul_div_traced)
te_matmul_div_traced(x1, x2, x3)
# self.assertAllFused(te_matmul_div_traced.graph_for(x1, x2, x3))
res_jit = te_matmul_div_traced(x1, x2, x3)
res_imperative = te_matmul_div(x1, x2, x3)
self.assertEqual(res_jit, res_imperative)
def test_ipex_unary_linear_fusion(self, op_list=unary_PyTorch_op_to_IPEX_op_map):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
batch_size = 3
out_channels = 32
in_channels = 3
for eltwise in op_list:
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
fusion_op = op_list[eltwise]
ipex_eltwise_op = fusion_op.ipex_eltwise_op
""" # Issue of "round"
The OP "round" in ideep has numeric issue when input is exactly 0.500,
so we fix the seed here for "round".
For example:
x = torch.Tensor([0.500])
ideep: 1.0 = torch.round(x)
expected: 0.0 = torch.round(x)
The seed to reproduce the failure: 1665593217573048320
"""
if "round" in ipex_eltwise_op:
torch.manual_seed(1665594679504775936)
print("TEST linear+%s" % ipex_eltwise_op)
for bias in [True, False]:
input_size = [batch_size, in_channels]
x = torch.randn(input_size)
# linear fusion only supports bf16
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
te_model = LinearEltwise(
eltwise, in_channels, out_channels, bias
).eval()
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_non_unary_linear_fusion(
self, op_list=non_unary_PyTorch_op_to_IPEX_op_map
):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
batch_size = 3
out_channels = 32
in_channels = 3
for eltwise in op_list:
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
fusion_op = op_list[eltwise]
ipex_eltwise_op = fusion_op.ipex_eltwise_op
print("TEST linear+%s" % ipex_eltwise_op)
for bias in [True, False]:
input_size = [batch_size, in_channels]
x = torch.randn(input_size)
op_input_list = fusion_op.op_input_list
# linear fusion only supports bf16
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
te_model = LinearEltwise(
eltwise, in_channels, out_channels, bias, **op_input_list
).eval()
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
torch._C._debug_set_fusion_group_inlining(old)
def test_ipex_linear_add(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST linear+add")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for bias in [True, False]:
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
te_model = IPEXLinearAdd(3, 32, bias).eval()
x = torch.randn(3, 3)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
x = torch.randn(8, 3)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
def test_ipex_linear_add_relu(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST linear+add+relu")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for bias in [True, False]:
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
te_model = IPEXLinearAddRelu(3, 32, bias).eval()
x = torch.randn(3, 3)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
x = torch.randn(8, 3)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
def test_ipex_linear_sigmoid_mul(self):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
print("TEST linear+sigmoid+mul")
rand_seed = int(get_rand_seed())
torch.manual_seed(rand_seed)
for bias in [True, False]:
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
te_model = IPEXLinearSigmoidMul(3, 32, bias).eval()
x = torch.randn(3, 3)
te_model_traced = torch.jit.trace(te_model, (x))
te_model_traced = torch.jit.freeze(te_model_traced)
te_model_traced(x)
# self.assertAllFused(te_model_traced.graph_for(x))
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
x = torch.randn(8, 3)
res_jit = te_model_traced(x)
res_imperative = te_model(x)
self.assertEqual(
res_jit,
res_imperative,
rtol=0.02,
atol=0.01,
msg="{}, {}".format(res_jit, res_imperative),
)
if __name__ == "__main__":
# ipex._C.enable_custom_op_2_nnc_fuser()
test = unittest.main()
| 24,027 | 38.13355 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_tpp_ops.py | import unittest
import torch
import random
import numpy
import intel_extension_for_pytorch as ipex
try:
import transformers
except ImportError:
import sys
import subprocess
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "transformers==4.11.0"]
)
import transformers
from common_utils import TestCase
import intel_extension_for_pytorch._C as torch_ipex_cpp
class Config:
def __init__(self):
self.attention_probs_dropout_prob = 0
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0
self.hidden_size = 1024
self.intermediate_size = 4096
self.layer_norm_eps = 1e-12
self.max_position_embeddings = 512
self.model_type = "bert"
self.num_attention_heads = 16
self.num_hidden_layers = 24
self.pad_token_id = 0
self.type_vocab_size = 2
self.vocab_size = 30522
self.is_decoder = False
seed = 12345
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch_ipex_cpp.xsmm_manual_seed(seed)
class TPPOPsTester(TestCase):
def setUp(self):
self.config = Config()
self.batch = 4
self.max_seq_len = 384
self.attention_mask = torch.zeros(self.batch, self.max_seq_len)
self.attention_mask[0][128:] += -10000.0
self.attention_mask[1][128:] += -10000.0
self.attention_mask[2][78:] += -10000.0
self.attention_mask[3][34:] += -10000.0
self.attention_mask = self.attention_mask.unsqueeze(dim=1).unsqueeze(dim=1)
return super().setUp()
def _unblock_grad(self, b_param):
return b_param.blocking_manager.unblock(b_param.grad.data).to(
b_param.unblocked_dtype
)
def _test_backward(self, hf_res, tpp_res, hf_model, tpp_model, prec=0.0001):
# UT for backward
hf_loss = hf_res.sum()
tpp_loss = tpp_res.sum()
hf_loss.backward()
tpp_loss.backward()
for param_hf, param_tpp in zip(hf_model.parameters(), tpp_model.parameters()):
if param_tpp.is_blocked():
self.assertEqual(
param_hf.grad, self._unblock_grad(param_tpp), prec=prec
)
else:
self.assertEqual(param_hf.grad, param_tpp.grad, prec=0.005)
def test_tpp_bert_embeddings(self):
hf_embs = transformers.models.bert.modeling_bert.BertEmbeddings(self.config)
tpp_embs = ipex.cpu.tpp.fused_bert.BertEmbeddings(self.config)
tpp_embs.load_state_dict(hf_embs.state_dict())
for i, j in zip(tpp_embs.state_dict(), tpp_embs.state_dict()):
assert i == j
input_ids = torch.randint(100, 3000, (4, 384)).to(torch.long)
input_ids[0][128:] = 0
input_ids[1][128:] = 0
input_ids[2][78:] = 0
input_ids[3][34:] = 0
token_type_ids = torch.ones(4, 384).to(torch.long)
token_type_ids[0][128:] = 0
token_type_ids[1][128:] = 0
token_type_ids[2][78:] = 0
token_type_ids[3][34:] = 0
hf_res = hf_embs(input_ids, token_type_ids)
tpp_res = tpp_embs(input_ids, token_type_ids).unblocked_tensor()
self.assertEqual(hf_res, tpp_res)
def test_tpp_bert_self_attention(self):
ipex.cpu.tpp.fused_bert.unpad = False
hf_self_att = transformers.models.bert.modeling_bert.BertSelfAttention(
self.config
)
tpp_self_att = ipex.cpu.tpp.fused_bert.BertSelfAttention(self.config)
tpp_self_att.load_state_dict(hf_self_att.state_dict())
hidden_states = torch.randn(
self.batch, self.max_seq_len, self.config.hidden_size
)
hf_res = hf_self_att(hidden_states, self.attention_mask)[0]
(
self.msk,
self.tpp_att_mask,
self.seq_offsets,
self.seq_spr_offsets,
) = ipex.cpu.tpp.fused_bert.generate_mask(self.attention_mask)
tpp_res = (
tpp_self_att(
hidden_states.view(
self.batch * self.max_seq_len, self.config.hidden_size
),
self.tpp_att_mask,
seq_offsets=self.seq_offsets,
seq_sqr_offsets=self.seq_spr_offsets,
)[0]
.unblocked_tensor()
.view(self.batch, self.max_seq_len, -1)
)
self.assertEqual(hf_res, tpp_res, prec=0.0002)
self._test_backward(hf_res, tpp_res, hf_self_att, tpp_self_att, prec=0.005)
def test_tpp_bert_output(self):
hf_self_out = transformers.models.bert.modeling_bert.BertSelfOutput(self.config)
tpp_self_out = ipex.cpu.tpp.fused_bert.BertSelfOutput(self.config)
tpp_self_out.load_state_dict(hf_self_out.state_dict())
hidden_states = torch.randn(
self.batch, self.max_seq_len, self.config.hidden_size
)
input_tensor = torch.randn(
self.batch, self.max_seq_len, self.config.hidden_size
)
hf_res = hf_self_out(hidden_states, input_tensor)
tpp_res = (
tpp_self_out(
hidden_states.view(
self.batch * self.max_seq_len, self.config.hidden_size
),
input_tensor.view(
self.batch * self.max_seq_len, self.config.hidden_size
),
)
.unblocked_tensor()
.view(self.batch, self.max_seq_len, -1)
)
self.assertEqual(hf_res, tpp_res, prec=0.001)
self._test_backward(hf_res, tpp_res, hf_self_out, tpp_self_out)
def test_tpp_bert_intermediate(self):
hf_intermediate = transformers.models.bert.modeling_bert.BertIntermediate(
self.config
)
tpp_intermediate = ipex.cpu.tpp.fused_bert.BertIntermediate(self.config)
tpp_intermediate.load_state_dict(hf_intermediate.state_dict())
hidden_states = torch.randn(
self.batch, self.max_seq_len, self.config.hidden_size
)
hf_res = hf_intermediate(hidden_states)
tpp_res = (
tpp_intermediate(
hidden_states.view(
self.batch * self.max_seq_len, self.config.hidden_size
)
)
.unblocked_tensor()
.view(self.batch, self.max_seq_len, -1)
)
self.assertEqual(hf_res, tpp_res, prec=0.001)
self._test_backward(
hf_res, tpp_res, hf_intermediate, tpp_intermediate, prec=0.01
)
if __name__ == "__main__":
test = unittest.main()
| 6,620 | 34.983696 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_lru_cache.py | import unittest
import torch
import torch.nn as nn
from common_utils import TestCase
import intel_extension_for_pytorch as ipex
class Conv2d(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=True)
def forward(self, x):
return self.conv(x)
class Linear(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(64, 64, bias=True)
def forward(self, x):
return self.linear(x)
class MatmulDiv(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = torch.transpose(x, -1, -2).contiguous()
z = torch.matmul(x, y)
return z.div(2.0)
class Tester(TestCase):
def test_a_lru_cache_resize(self):
import os
# Set LRU_CACHE_CAPACITY < 1024 to trigger resize
os.environ["LRU_CACHE_CAPACITY"] = "512"
# Conv
conv = Conv2d().eval()
conv = ipex.optimize(conv, dtype=torch.float32)
conv(torch.randn(3, 64, 56, 56))
# Linear
linear = Linear().eval()
linear = ipex.optimize(linear, dtype=torch.bfloat16)
linear(torch.randn((100, 64), dtype=torch.bfloat16))
# Matmul
matmul = MatmulDiv().eval()
x = torch.randn(10, 3, 4)
traced_model = torch.jit.trace(matmul, x).eval()
traced_model.graph_for(x)
# unset this environment variable
del os.environ["LRU_CACHE_CAPACITY"]
if __name__ == "__main__":
test = unittest.main()
| 1,575 | 24.836066 | 83 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_code_free_optimization.py | import unittest
from common_utils import TestCase
import os
import subprocess
import itertools
import logging
logging.getLogger().setLevel(logging.DEBUG)
class TestCodeFreeOptimization(TestCase):
def test_conv_bn(self):
loc = os.path.dirname(os.path.abspath(__file__))
disable_ipex_graph_modes = [False, True]
dtypes = ["float32", "bfloat16"]
for disable_ipex_graph_mode, dtype in itertools.product(
disable_ipex_graph_modes, dtypes
):
_ipex_optimize_hit_count = 0
_ipex_convolution = False
_has_batchnorm = False
cmd = "ipexrun --ninstances 1 "
cmd += "--auto-ipex "
cmd += "--dtype {} ".format(dtype)
cmd += "--auto-ipex-verbose "
if disable_ipex_graph_mode:
cmd += "--disable-ipex-graph-mode "
cmd += "{}/code_free_optimization.py --conv_bn".format(loc)
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if line.__contains__("_ipex_optimize_hit_count"):
_ipex_optimize_hit_count = _ipex_optimize_hit_count + 1
if line.__contains__(
"torch_ipex::convolution_forward_impl"
if disable_ipex_graph_mode
else "ipex_prepack::convolution_run"
) or line.__contains__("fused_to"):
_ipex_convolution = True
if line.__contains__("batch_norm"):
_has_batchnorm = True
assert (
_ipex_optimize_hit_count == 1
), "Expect hit once of ipex.optimize globally"
assert _ipex_convolution, "Expect use ipex convolution by ipex.optimize"
assert _has_batchnorm is False, "should not see bn"
def test_conv_bn_with_module_created_in_forward(self):
loc = os.path.dirname(os.path.abspath(__file__))
disable_ipex_graph_modes = [False, True]
dtypes = ["float32", "bfloat16"]
for disable_ipex_graph_mode, dtype in itertools.product(
disable_ipex_graph_modes, dtypes
):
_ipex_optimize_hit_count = 0
_ipex_convolution = False
cmd = "ipexrun --ninstances 1 "
cmd += "--auto-ipex "
cmd += "--dtype {} ".format(dtype)
cmd += "--auto-ipex-verbose "
if disable_ipex_graph_mode:
cmd += "--disable-ipex-graph-mode "
cmd += "{}/code_free_optimization.py --conv_bn_with_module_created_in_forward".format(
loc
)
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if line.__contains__("_ipex_optimize_hit_count"):
_ipex_optimize_hit_count = _ipex_optimize_hit_count + 1
if line.__contains__(
"torch_ipex::convolution_forward_impl"
if disable_ipex_graph_mode
else "ipex_prepack::convolution_run"
) or line.__contains__("fused_to"):
_ipex_convolution = True
assert (
_ipex_optimize_hit_count == 1
), "Expect hit once of ipex.optimize globally"
assert _ipex_convolution, "Expect use ipex convolution by ipex.optimize"
# Not check BN, because FX limitation, ipex.optimize failed to do fusion
def test_auto_ipex_module(self):
loc = os.path.dirname(os.path.abspath(__file__))
disable_ipex_graph_modes = [False, True]
dtypes = ["float32", "bfloat16"]
for disable_ipex_graph_mode, dtype in itertools.product(
disable_ipex_graph_modes, dtypes
):
_ipex_optimize_hit_count = 0
_ipex_convolution = False
_has_batchnorm = False
cmd = "python -m intel_extension_for_pytorch.cpu.auto_ipex "
cmd += "--dtype {} ".format(dtype)
cmd += "--auto-ipex-verbose "
if disable_ipex_graph_mode:
cmd += "--disable-ipex-graph-mode "
cmd += "{}/code_free_optimization.py --conv_bn".format(loc)
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if line.__contains__("_ipex_optimize_hit_count"):
_ipex_optimize_hit_count = _ipex_optimize_hit_count + 1
if line.__contains__(
"torch_ipex::convolution_forward_impl"
if disable_ipex_graph_mode
else "ipex_prepack::convolution_run"
) or line.__contains__("fused_to"):
_ipex_convolution = True
if line.__contains__("batch_norm"):
_has_batchnorm = True
assert (
_ipex_optimize_hit_count == 1
), "Expect hit once of ipex.optimize globally"
assert _ipex_convolution, "Expect use ipex convolution by ipex.optimize"
assert _has_batchnorm is False, "should not see bn"
if __name__ == "__main__":
test = unittest.main()
| 5,731 | 43.78125 | 98 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_ao_jit_llga_utils.py | import copy
import torch
from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig
from functools import wraps
from torch.testing._internal.jit_utils import (
JitTestCase,
get_execution_plan,
)
from torch.jit._recursive import wrap_cpp_module
import intel_extension_for_pytorch as ipex
LLGA_FUSION_GROUP = "ipex::LlgaFusionGroup"
default_static_qconfig = QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_affine, dtype=torch.quint8
),
weight=PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
),
)
def get_eltwise_fn(name):
if hasattr(torch, name):
return getattr(torch, name)
elif hasattr(torch.nn.functional, name):
return getattr(torch.nn.functional, name)
else:
if name == "hardswish_":
return torch.nn.Hardswish(inplace=True)
elif name == "mish_":
return torch.nn.Mish(inplace=True)
raise NameError("Eltwise function %s not found" % name)
# For fp32 and bf16 LLGA UT only
def llga_fp32_bf16_test_env(func):
@wraps(func)
def wrapTheFunction(*args):
# make sure that the profiling mode is turned on
torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(True)
ipex._C.set_llga_fp32_bf16_enabled(True)
func(*args)
ipex._C.set_llga_fp32_bf16_enabled(False)
return wrapTheFunction
def all_backward_graphs(module):
ge_state = module.get_debug_state()
fwd_plan = get_execution_plan(ge_state)
executors = fwd_plan.code.grad_executor_states()
assert len(executors), "No backward graph found in the module"
grad_executor = executors[0]
bwd_plans = list(grad_executor.execution_plans.values())
return [p.graph.copy() for p in bwd_plans]
def backward_graph(module):
graphs = all_backward_graphs(module)
assert len(graphs), "Warm up the module before calling backward_graph"
return graphs[0]
def freeze(model):
return wrap_cpp_module(torch._C._freeze_module(model._c, preserveParameters=True))
# port from pytorch/test/test_jit_fuser_te.py
def findFusionGroups(graph):
result = []
for n in graph.nodes():
if n.kind() == LLGA_FUSION_GROUP:
result.append(n.g("Subgraph"))
continue
for block in n.blocks():
result += findFusionGroups(block)
return result
def warmup_forward(f, *args, profiling_count=2):
for i in range(profiling_count):
results = f(*args)
return results
class JitLlgaTestCase(JitTestCase):
def checkScript(self, m, x, freeze=True):
if isinstance(m, torch.nn.Module):
m.eval()
with torch.no_grad():
ref = m(*x)
scripted = torch.jit.script(m)
if isinstance(scripted, torch.nn.Module) and freeze:
scripted = torch.jit.freeze(scripted)
warmup_forward(scripted, *x)
graph = scripted.graph_for(*x)
y = scripted(*x)
self.assertEqual(y, ref)
return graph, scripted
def checkTrace(self, m, x, freeze=True, *args, **kwargs):
if isinstance(m, torch.nn.Module):
m.eval()
with torch.no_grad(), torch._jit_internal._disable_emit_hooks():
traced = torch.jit.trace(m, x)
if isinstance(traced, torch.nn.Module) and freeze:
traced = torch.jit.freeze(traced)
warmup_forward(traced, *x)
fwd_graph = traced.graph_for(*x)
ref_o = m(*x)
jit_o = traced(*x)
self.assertEqual(jit_o, ref_o)
return fwd_graph, traced
def assertFused(self, graph, fused_patterns):
for pat in fused_patterns:
self.assertGraphContainsExactly(graph, pat, 0)
def model_forward_helper(
self,
model,
x=None,
x_kwarg=None,
):
if x is None and x_kwarg is None:
raise AssertionError(
"x and x_kwarg cannot be none at same time for model_forward_helper."
)
if x_kwarg is None:
return model(*x)
elif x is None:
return model(**x_kwarg)
else:
raise AssertionError(
"x and x_kwarg cannot be set at same time for model_forward_helper."
)
def checkQuantizeTrace(
self,
model,
x=None,
atol=1e-3,
rtol=1e-2,
x_var=None,
qconfig=default_static_qconfig,
int8_bf16=False,
freeze=True,
x_kwarg=None,
expect_result=None,
):
if x is None and x_kwarg is None:
raise AssertionError(
"x and x_kwarg cannot be none at same time for checkQuantizeTrace."
)
elif x is not None and x_kwarg is not None:
raise AssertionError(
"x and x_kwarg cannot be set at same time for checkQuantizeTrace."
)
graph, traced_model, fp32_model = self.prepareModel(
model, x, qconfig, int8_bf16, freeze=freeze, x_kwarg=x_kwarg
)
with torch.no_grad():
y = self.model_forward_helper(fp32_model, x, x_kwarg)
y = y.to(torch.bfloat16) if int8_bf16 else y
expect = expect_result if expect_result is not None else y
y_llga = self.model_forward_helper(traced_model, x, x_kwarg)
self.assertEqual(expect, y_llga, atol=atol, rtol=rtol)
# test Fallback when input shape changes:
if x_var:
assert x_kwarg is None, "x_kwarg input doesn't suppport use with x_var"
y_var = fp32_model(*x_var)
y_var = y_var.to(torch.bfloat16) if int8_bf16 else y_var
y_var_llga = traced_model(*x_var)
self.assertEqual(y_var, y_var_llga, atol=atol, rtol=rtol)
return graph
def prepareModel(
self,
model,
x,
qconfig=default_static_qconfig,
int8_bf16=False,
prepare_inplace=True,
convert_inplace=True,
freeze=True,
x_kwarg=None,
):
model.eval()
fp32_model = copy.deepcopy(model)
with torch.no_grad(), torch._jit_internal._disable_emit_hooks():
ipex.nn.utils._model_convert.replace_dropout_with_identity(model)
model = ipex.quantization.prepare(
model, qconfig, x, inplace=prepare_inplace, example_kwarg_inputs=x_kwarg
)
# do calibration
y = self.model_forward_helper(model, x, x_kwarg)
# jit trace to insert quant/dequant
def jit_trace_helper(convert_model, x, x_kwarg):
if x_kwarg is None:
return torch.jit.trace(convert_model, x)
elif x is None:
return torch.jit.trace(convert_model, example_kwarg_inputs=x_kwarg)
else:
raise AssertionError(
"Can't set x and x_kwarg at same time for jit trace."
)
if int8_bf16:
with torch.cpu.amp.autocast():
convert_model = ipex.quantization.convert(
model, inplace=convert_inplace
)
traced_model = jit_trace_helper(convert_model, x, x_kwarg)
else:
convert_model = ipex.quantization.convert(
model, inplace=convert_inplace
)
traced_model = jit_trace_helper(convert_model, x, x_kwarg)
if freeze:
traced_model = torch.jit.freeze(traced_model)
# warm up run
y0 = self.model_forward_helper(traced_model, x, x_kwarg)
# get the graph at the second run after freezing
if x_kwarg is None:
graph = traced_model.graph_for(*x)
elif x is None:
graph = traced_model.graph_for(**x_kwarg)
else:
raise AssertionError("Can't set x and x_kwarg at same time")
return graph, traced_model, fp32_model
def checkPatterns(self, graph, patterns):
fusion_groups = findFusionGroups(graph)
assert len(fusion_groups) == len(
patterns
), "length of subgraphs not equal to length of given patterns"
for i in range(len(fusion_groups)):
for pattern in patterns[i]:
self.assertGraphContains(fusion_groups[i], pattern)
def checkAttr(self, graph, node, attr):
def count(block, node, attr):
for n in block.nodes():
if n.kind() == node:
self.assertFalse(n.hasAttribute("qtype"))
for block in n.blocks():
count(block, node, attr)
count(graph, node, attr)
| 8,943 | 33.007605 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/verbose.py | import argparse
import torch
import intel_extension_for_pytorch as ipex
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.conv = torch.nn.Conv2d(1, 10, 5, 1)
def forward(self, x):
y = self.conv(x)
return y
def run_model(level):
m = Module().eval()
m = ipex.optimize(m, dtype=torch.float32, level="O1")
d = torch.rand(1, 1, 112, 112)
with ipex.verbose(level):
m(d)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose-level", default=0, type=int)
args = parser.parse_args()
run_model(args.verbose_level)
| 667 | 22.034483 | 63 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/profile_ipex_op.py | import argparse
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
def trace_handler(prof):
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=-1))
class inplace_softmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x1 = x + 1
x2 = nn.Softmax(dim=-1)(x1)
return x2
def run_profile(model, x):
with torch.no_grad():
model = torch.jit.trace(model, x)
for _ in range(10):
res = model(x)
# UT is testing the usecases mentioned in PyTorch doc
# https://pytorch.org/docs/stable/profiler.html
# usecase 1
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as prof1:
for _ in range(10):
res = model(x)
print(prof1.key_averages().table(sort_by="self_cpu_time_total", row_limit=-1))
# usecase 2
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU],
schedule=torch.profiler.schedule(wait=2, warmup=3, active=5),
on_trace_ready=trace_handler,
) as prof2:
for _ in range(10):
res = model(x)
prof2.step()
def run_model(llga):
llga_enabled = ipex._C.is_llga_fp32_bf16_enabled()
if llga:
ipex._C.set_llga_fp32_bf16_enabled(True)
x = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
model = inplace_softmax().eval()
run_profile(model, x)
ipex._C.set_llga_fp32_bf16_enabled(llga_enabled)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--llga", action="store_true", help="use llga for fp32 path", default=False
)
args = parser.parse_args()
run_model(args.llga)
| 1,769 | 24.652174 | 83 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_softmax.py | import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
from torch.testing._internal.jit_utils import JitTestCase
from intel_extension_for_pytorch.quantization import prepare, convert
from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig
import unittest
IPEX_SOFTMAX = "ipex::softmax"
IPEX_SOFTMAX_ = "ipex::softmax_"
ATEN_SOFTMAX = "aten::softmax"
class softmax_with_multiuse_input(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x1 = nn.Softmax(dim=-1)(x)
x2 = x + x1
return x1, x2
class softmax_with_alias_input(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x1 = x
x2 = nn.Softmax(dim=-1)(x)
return x1, x2
class inplace_softmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x1 = x + 1
x2 = nn.Softmax(dim=-1)(x1)
return x2
class inplace_softmax_with_blocks(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, flag):
if flag:
x1 = x + 1
else:
x1 = x + 3
x2 = torch.softmax(x1, dim=-1)
return x2
class softmax_MHA(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
attention_scores = torch.matmul(x, x.transpose(-1, -2))
attention_scores = attention_scores / 64
attention_scores = nn.Softmax(dim=-1)(attention_scores)
return attention_scores
class inplace_softmax_with_TE_group(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x1 = x + 1
x2 = x + 2
x3 = x + 3
x4 = x + 4
x5 = x + 5
y1 = (x1 / x2).softmax(dim=-1)
y2 = ((x4 - x3) / x5).softmax(dim=-1)
return y1, y2
class softmax_dtype(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.nn.functional.softmax(x, dtype=x.dtype, dim=1)
class inplace_softmax_dtype(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x1 = x + 1
return torch.nn.functional.softmax(x1, dtype=x.dtype, dim=1)
class SoftmaxTester(JitTestCase):
def test_softmax(self):
for dtype in ["fp32", "bf16"]:
test1 = torch.tensor([[2.0, 2.0], [2.0, 2.0]])
test2 = torch.tensor([[2.0, 2.0], [2.0, 2.0]])
test3 = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
test4 = torch.tensor([[1.0, 1.0], [1.0, 1.0]]).transpose(1, 0)
test5 = torch.tensor([[2.0, 2.0], [2.0, 2.0]]).transpose(1, 0)
test6 = torch.rand(1, 16, 64, 64)
test7 = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
test8 = torch.rand(2, 3)
test9 = test8 - 1
test10 = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
if dtype == "bf16":
test1 = test1.bfloat16()
test2 = test2.bfloat16()
test3 = test3.bfloat16()
test4 = test4.bfloat16()
test5 = test5.bfloat16()
test7 = test7.bfloat16()
test8 = test8.bfloat16()
test9 = test9.bfloat16()
test10 = test10.bfloat16()
model1 = softmax_with_multiuse_input().eval()
model2 = softmax_with_alias_input().eval()
model3 = inplace_softmax().eval()
model4 = inplace_softmax().eval()
model5 = softmax_with_multiuse_input().eval()
model6 = softmax_MHA().eval()
model7 = inplace_softmax_with_blocks().eval()
model8 = softmax_dtype().eval()
model9 = inplace_softmax_dtype().eval()
model10 = inplace_softmax_with_TE_group().eval()
with torch.no_grad():
model1 = torch.jit.trace(model1, test1)
res1 = model1(test1)
model2 = torch.jit.trace(model2, test2)
res2 = model2(test2)
model3 = torch.jit.trace(model3, test3)
res3 = model3(test3)
model4 = torch.jit.trace(model4, test4)
res4 = model4(test4)
model5 = torch.jit.trace(model5, test5)
res5 = model5(test5)
model7 = torch.jit.script(model7)
res7 = model7(test7, torch.BoolTensor([True]))
model8 = torch.jit.trace(model8, test8)
res8 = model8(test8)
model9 = torch.jit.trace(model9, test9)
res9 = model9(test9)
model10_traced = torch.jit.trace(model10, test10)
res10_traced = model10_traced(test10)
res10 = model10(test10)
# int8 case, testing inplac with llga fusion group
qconfig = QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_affine, dtype=torch.quint8
),
weight=PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
),
)
ipex.nn.utils._model_convert.replace_dropout_with_identity(model6)
prepared_model = prepare(
model6, qconfig, example_inputs=test6, inplace=False
)
prepared_model(test6)
converted_model = convert(prepared_model)
converted_model = torch.jit.trace(converted_model, test6)
converted_model = torch.jit.freeze(converted_model)
with torch.no_grad():
res6 = converted_model(test6)
res6 = converted_model(test6)
with torch.no_grad():
res6 = converted_model(test6)
res6_ori = model6(test6)
# should be outplace since multi-use
graph1 = model1.graph_for(test1)
self.assertGraphContainsExactly(graph1, IPEX_SOFTMAX, 1)
# should be outplace since alias
graph2 = model2.graph_for(test2)
self.assertGraphContainsExactly(graph2, IPEX_SOFTMAX, 1)
# should be inplace
graph3 = model3.graph_for(test3)
self.assertGraphContainsExactly(graph3, IPEX_SOFTMAX_, 1)
# inplace test, but should be aten::softmax due to non-contiguous input
graph4 = model4.graph_for(test4)
self.assertGraphContainsExactly(graph4, ATEN_SOFTMAX, 1)
# outplace test, but should be aten::softmax due to non-contiguous input
graph5 = model5.graph_for(test5)
self.assertGraphContainsExactly(graph5, ATEN_SOFTMAX, 1)
# should be inplace and pass the checking in llga fusion group
graph6 = converted_model.graph_for(test3)
self.assertGraphContainsExactly(graph6, IPEX_SOFTMAX_, 1)
# should be inplace
graph7 = model7.graph_for(test7, torch.BoolTensor([True]))
self.assertGraphContainsExactly(graph7, IPEX_SOFTMAX_, 1)
# should be outplace
graph8 = model8.graph_for(test8)
self.assertGraphContainsExactly(graph8, IPEX_SOFTMAX, 1)
# should be inplace
graph9 = model9.graph_for(test9)
self.assertGraphContainsExactly(graph9, IPEX_SOFTMAX_, 1)
# should be inplace
graph10 = model10_traced.graph_for(test10)
self.assertGraphContainsExactly(graph10, IPEX_SOFTMAX_, 2)
# the output results of above inplace/outplace softmax should be the same
self.assertEqual(res1[0], res2[1], 0)
self.assertEqual(res1[0], res3, 0)
self.assertEqual(res1[0], res4, 0)
self.assertEqual(res1[0], res5[0], 0)
self.assertEqual(res1[0], res7, 0)
self.assertEqual(res8, res9, 0)
self.assertEqual(res10[0], res10_traced[0], 0)
self.assertEqual(res10[1], res10_traced[1], 0)
if __name__ == "__main__":
test = unittest.main()
| 8,197 | 35.274336 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/bench/custom_op_bench/interaction.py | import torch
import intel_extension_for_pytorch as ipex
from torch.utils import ThroughputBenchmark
import argparse
class Interaction(torch.nn.Module):
def __init__(self):
super(Interaction, self).__init__()
def forward(self, x):
return ipex.nn.functional.interaction(*x)
def inference_benchmark(num_instance, interact_module, dtype):
inputs = []
for i in range(0, 27):
inputs.append(torch.randn([128, 128]).to(dtype))
with torch.no_grad():
interact_module = torch.jit.trace(interact_module, [inputs], check_trace=False)
bench = ThroughputBenchmark(interact_module)
bench.add_input(inputs)
stats = bench.benchmark(
num_calling_threads=num_instance,
num_warmup_iters=100,
num_iters=1000 * num_instance,
)
print(stats)
def training_benchmark(interact_module, dtype):
import time
inputs = []
for i in range(0, 27):
inputs.append(torch.randn([4096, 128]).to(dtype).requires_grad_())
# warmup
for _ in range(100):
y = interact_module.forward(inputs).sum()
y.backward()
startT = time.time()
for _ in range(1000):
y = interact_module.forward(inputs).sum()
y.backward()
endT = time.time()
avg_elapsed = endT - startT
print("Took {} ms on average to run {} FW+BW".format(avg_elapsed, "interaction"))
def run():
parser = argparse.ArgumentParser(description="benchmark for ipex interaction")
parser.add_argument("--num-instance", type=int, default=1)
parser.add_argument("--bf16", action="store_true", default=False)
parser.add_argument("--inference", action="store_true", default=False)
args = parser.parse_args()
dtype = torch.bfloat16 if args.bf16 else torch.float32
interact_module = Interaction()
if args.inference:
inference_benchmark(args.num_instance, interact_module, dtype)
else:
training_benchmark(interact_module, dtype)
if __name__ == "__main__":
run()
| 2,028 | 29.283582 | 87 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/bench/custom_op_bench/merged_embeddingbag.py | import torch
import intel_extension_for_pytorch as ipex
import time
import copy
r"""
vector-size = 128
batch-size = 7168
r"""
a = torch.ones(256 * 1024 * 1024 // 4, dtype=torch.float)
b = torch.ones(256 * 1024 * 1024 // 4, dtype=torch.float)
def cache_flush():
# We assume the cache size is <= 512MB here.
# a = torch.ones(256 * 1024 * 1024 // 4, dtype=torch.float)
# b = torch.ones(256 * 1024 * 1024 // 4, dtype=torch.float)
# a, b are initialized out of this function to avoid allocate memory every time
global a, b
a += b
class EmbeddingBagList(torch.nn.Module):
def __init__(self, max_rows, vector_size):
super(EmbeddingBagList, self).__init__()
self.emb_list = torch.nn.ModuleList()
for n_f in max_rows:
self.emb_list.append(
torch.nn.EmbeddingBag(n_f, vector_size, mode="sum", sparse=True)
)
def forward(self, indices, offsets):
ly = []
for k, sparse_index_group_batch in enumerate(indices):
sparse_offset_group_batch = offsets[k]
E = self.emb_list[k]
V = E(sparse_index_group_batch, sparse_offset_group_batch)
ly.append(V)
return ly
def run_bench(bench_name, module, input_data, optimizer=None, training=False):
iters = 100 if training else 1000
for i in range(iters):
cache_flush()
outs = module(*input_data)
if training:
loss = 0
for out in outs:
loss += out.sum()
loss.backward()
if isinstance(module, EmbeddingBagList):
optimizer.step()
optimizer.zero_grad(set_to_none=True)
start = time.time()
exclude_time = 0
for i in range(iters):
flush_start = time.time()
cache_flush()
exclude_time += time.time() - flush_start
outs = module(*input_data)
if training:
loss = 0
sum_start = time.time()
for out in outs:
loss += out.sum()
exclude_time += time.time() - sum_start
loss.backward()
if isinstance(module, EmbeddingBagList):
optimizer.step()
optimizer.zero_grad(set_to_none=True)
end = time.time()
avg_elapsed = end - start - exclude_time
print("Took {} ms on average to run {} benchmark".format(avg_elapsed, bench_name))
def inference_bench(dataset, emb_list, merged_emb):
emblist_input, merged_emb_input = dataset
run_bench("EmbedddingBag List Inference", emb_list, emblist_input)
run_bench("Merged EmbedddingBag Inference", merged_emb, merged_emb_input)
def training_bench(dataset, emb_list, merged_emb, optimizer):
emblist_input, merged_emb_input = dataset
run_bench(
"EmbedddingBag List Training",
emb_list,
emblist_input,
optimizer=optimizer,
training=True,
)
run_bench(
"Merged EmbedddingBag Training", merged_emb, merged_emb_input, training=True
)
def get_data(distribution, merged_emb, max_rows, batch_size):
indices = []
offsets = []
include_last = [False for i in range(len(max_rows))]
for i in range(len(max_rows)):
idx = torch.empty(batch_size, dtype=torch.int64)
if batch_size <= max_rows[i]:
j = int(max_rows[i] / batch_size)
for k in range(batch_size):
value = k * j if (distribution == "balance" or k % 2 == 0) else 0
idx[k] = value
else:
for k in range(batch_size):
value = (
k % max_rows[i] if (distribution == "balance" or k % 2 == 0) else 0
)
idx[k] = value
indices.append(idx)
offsets.append(torch.arange(batch_size))
merged_input = merged_emb.linearize_indices_and_offsets(
indices, offsets, include_last
)
return (indices, offsets), (merged_input, torch.BoolTensor([False]))
def run():
import argparse
parser = argparse.ArgumentParser(description="benchmark for ipex embeddingbag")
parser.add_argument(
"--data-distribution", type=str, choices=["balance", "unbalance"]
)
parser.add_argument("--inference", action="store_true", default=False)
parser.add_argument("--batch-size", type=int, default=7168)
parser.add_argument("--vector-size", type=int, default=128)
args = parser.parse_args()
max_rows = [args.batch_size for i in range(26)]
emb_list = EmbeddingBagList(max_rows, args.vector_size)
sgd = torch.optim.SGD(emb_list.parameters(), lr=0.01)
emb_list, sgd = ipex.optimize(model=emb_list, optimizer=sgd, dtype=torch.float)
merged_emb = ipex.nn.modules.MergedEmbeddingBagWithSGD.from_embeddingbag_list(
copy.deepcopy(emb_list.emb_list)
)
input_data = get_data(args.data_distribution, merged_emb, max_rows, args.batch_size)
if args.inference:
inference_bench(input_data, emb_list, merged_emb)
else:
training_bench(input_data, emb_list, merged_emb, sgd)
if __name__ == "__main__":
run()
| 5,123 | 31.636943 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/bench/custom_op_bench/optimizer.py | import torch
import time
import math
a = torch.ones(256 * 1024 * 1024 // 4, dtype=torch.float)
b = torch.ones(256 * 1024 * 1024 // 4, dtype=torch.float)
def flush():
global a, b
a += b
def non_fused_sgd(
param, grad, momentum_buf, momentum, lr, weight_decay, dampening, nesterov
):
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buf
if buf is None:
buf = torch.clone(grad).detach()
else:
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
param.add_(grad, alpha=-lr)
def non_fused_lamb(
param, exp_avg, exp_avg_sq, grad, step, beta1, beta2, lr, weight_decay, eps
):
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
adam_step = (exp_avg / bias_correction1) / (
(exp_avg_sq / bias_correction2).sqrt() + eps
)
if weight_decay != 0:
adam_step.add_(param, alpha=weight_decay)
weight_norm = param.norm(p=2)
rtw_norm = adam_step.norm(p=2)
true_ratio = weight_norm / rtw_norm
param.add_(adam_step, alpha=-lr * true_ratio)
def non_fused_adagrad(param, grad, state_sum, step, lr, weight_decay, lr_decay, eps):
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
clr = lr / (1 + (step - 1) * lr_decay)
state_sum.addcmul_(grad, grad, value=1)
std = state_sum.sqrt().add_(eps)
param.addcdiv_(grad, std, value=-clr)
def non_fused_adam(
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
amsgrad,
step,
beta1,
beta2,
lr,
weight_decay,
eps,
):
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
def run_bench(bench_name, func, *params):
for _ in range(1000):
flush()
func(*params)
start = time.time()
flush_time = 0
for i in range(1000):
flush_start = time.time()
flush()
flush_time += time.time() - flush_start
func(*params)
end = time.time()
avg_elapsed = end - start - flush_time
print("Took {} ms on average to run {} update".format(avg_elapsed, bench_name))
def sgd_bench():
print("Running benchmark for SGD update step")
fused = torch.ops.torch_ipex.sgd_fused_step
non_fused = non_fused_sgd
learning_rate = 0.1
weight_decay = 0.3
momentum = 0.5
dampening = 0.5
nesterov = True
for param_size in [1024, 512 * 1024, 8 * 1024 * 1024]:
param = torch.randn(param_size)
grad = torch.randn(param_size)
momentum_buf = torch.randn(param_size)
dummy_trail = torch.Tensor()
trail = torch.randn(param_size).bfloat16()
print("For parameter size", param_size)
run_bench(
"fused sgd",
fused,
param,
grad,
momentum_buf,
dummy_trail,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
run_bench(
"fused split sgd",
fused,
param.bfloat16(),
grad.bfloat16(),
momentum_buf,
trail,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
run_bench(
"non fused sgd",
non_fused,
param,
grad,
momentum_buf,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
def lamb_bench():
print("Running benchmark for Lamb update step")
fused = torch.ops.torch_ipex.lamb_fused_step
non_fused = non_fused_lamb
step = 10
beta1 = 0.8
beta2 = 0.9
learning_rate = 0.1
weight_decay = 0.3
eps = 0.001
for param_size in [1024, 512 * 1024, 8 * 1024 * 1024]:
param = torch.randn(param_size)
grad = torch.randn(param_size)
exp_avg = torch.randn(param_size).abs()
exp_avg_sq = torch.randn(param_size).abs()
dummy_trail = torch.Tensor()
trail = torch.randn(param_size).bfloat16()
print("For parameter size", param_size)
run_bench(
"fused lamb",
fused,
param,
exp_avg,
exp_avg_sq,
grad,
dummy_trail,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
run_bench(
"fused split lamb",
fused,
param.bfloat16(),
exp_avg,
exp_avg_sq,
grad.bfloat16(),
trail,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
run_bench(
"non fused lamb",
non_fused,
param,
exp_avg,
exp_avg_sq,
grad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
def adagrad_bench():
print("Running benchmark for Adagrad update step")
fused = torch.ops.torch_ipex.adagrad_fused_step
non_fused = non_fused_adagrad
step = 10
learning_rate = 0.1
weight_decay = 0.3
lr_decay = 0.01
eps = 0.001
for param_size in [1024, 512 * 1024, 8 * 1024 * 1024]:
param = torch.randn(param_size)
grad = torch.randn(param_size)
state_sum = torch.randn(param_size)
dummy_trail = torch.Tensor()
trail = torch.randn(param_size).bfloat16()
print("For parameter size", param_size)
run_bench(
"fused adagrad",
fused,
param,
grad,
state_sum,
dummy_trail,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
run_bench(
"fused split adagrad",
fused,
param.bfloat16(),
grad.bfloat16(),
state_sum,
trail,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
run_bench(
"non fused adagrad",
non_fused,
param,
grad,
state_sum,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
def adam_bench():
print("Running benchmark for Adam update step")
fused = torch.ops.torch_ipex.adam_fused_step
non_fused = non_fused_adam
step = 10
beta1 = 0.8
beta2 = 0.9
learning_rate = 0.1
weight_decay = 0.3
eps = 0.001
amsgrad = True
for param_size in [1024, 512 * 1024, 8 * 1024 * 1024]:
param = torch.randn(param_size)
grad = torch.randn(param_size)
exp_avg = torch.randn(param_size).abs()
exp_avg_sq = torch.randn(param_size).abs()
max_exp_avg_sq = torch.randn(param_size).abs()
dummy_trail = torch.Tensor()
trail = torch.randn(param_size).bfloat16()
print("For parameter size", param_size)
run_bench(
"fused Adam",
fused,
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
dummy_trail,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
run_bench(
"fused split Adam",
fused,
param.bfloat16(),
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad.bfloat16(),
trail,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
run_bench(
"non fused Adam",
non_fused,
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
def run():
import argparse
parser = argparse.ArgumentParser(description="benchmark for ipex optimizer")
benchs = {
"sgd": sgd_bench,
"lamb": lamb_bench,
"adagrad": adagrad_bench,
"adam": adam_bench,
}
parser.add_argument(
"--optimizer",
type=str,
choices=["sgd", "lamb", "adagrad", "adam"],
default="sgd",
)
args = parser.parse_args()
bench = benchs[args.optimizer]
bench()
if __name__ == "__main__":
run()
| 9,897 | 23.93199 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/utils/utils.py | import torch
import unittest
from torch.testing._internal import expecttest
from functools import wraps
import torch_ipex as ipex
class VerboseTestCase(expecttest.TestCase):
def __init__(self, method_name="runTest"):
super(expecttest.TestCase, self).__init__(method_name)
def is_dnnl_verbose(self, line):
tokens = line.strip().split(",")
return tokens[0] == "dnnl_verbose" and len(tokens) == 11
def is_dnnl_reorder(self, line):
assert self.is_dnnl_verbose(line)
return line.strip().split(",")[3] == "reorder"
def get_reorder_info(self, line):
assert self.is_dnnl_reorder(line)
tokens = line.split(",")
src_desc, dst_desc = tokens[6].split(" ")
src_dtype = src_desc.split("::")[0].split("-")
src_format = src_desc.split("::")[1]
dst_dtype = dst_desc.split("::")[0].split("-")
dst_format = dst_desc.split("::")[1]
return src_dtype, src_format, dst_dtype, dst_format
def ReorderForPack(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
return src_dtype == dst_dtype
def OnlyReorderDtype(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
return src_dtype != dst_dtype and src_format == dst_dtype
def OnlyReorderFormat(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
return src_dtype == dst_dtype and src_format != dst_dtype
def assertOnlyReorderDtype(self, line):
assert OnlyReorderDtype(line), "the verbose msg shows not only reorder dtype"
def assertOnlyReorderFormat(self, line):
assert OnlyReorderFormat(line), "the verbose msg shows not only reorder format"
def assertNotReorder(self, line):
assert not is_dnnl_reorder(line)
TEST_MKL = torch.backends.mkl.is_available()
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, "_do_cuda_non_default_stream", True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest("PyTorch compiled without Lapack")
else:
fn(*args, **kwargs)
return wrapper
def int8_calibration(model, data, dir):
conf = ipex.AmpConf(torch.int8)
with torch.no_grad():
for x in data:
with ipex.AutoMixPrecision(conf, running_mode="calibration"):
model(x)
conf.save(dir)
| 3,062 | 30.255102 | 87 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/data/network2.py | """
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(10, 20)
self.relu = nn.ReLU()
| 1,807 | 31.872727 | 106 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/data/network1.py | """
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(10, 20)
| 1,777 | 31.925926 | 106 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import intel_extension_for_pytorch as ipex
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'intel_extension_for_pytorch'
copyright = 'Intel(R)'
author = ''
# The full version, including alpha/beta/rc tags
version = ipex.__version__
release = ipex.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_md',
'sphinxemoji.sphinxemoji'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'vcs_pageview_mode': '',
#'style_nav_header_background': 'white',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': -1,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['_static']
def setup(app):
app.add_css_file("custom.css")
from os import getenv
sphinx_md_useGitHubURL = True
baseBranch = "main"
commitSHA = getenv('GITHUB_SHA')
githubBaseURL = 'https://github.com/' + (getenv('GITHUB_REPOSITORY') or 'intel/intel-extension-for-pytorch') + '/'
githubFileURL = githubBaseURL + "blob/"
githubDirURL = githubBaseURL + "tree/"
if commitSHA:
githubFileURL = githubFileURL + commitSHA + "/"
githubDirURL = githubDirURL + commitSHA + "/"
else:
githubFileURL = githubFileURL + baseBranch + "/"
githubDirURL = githubDirURL + baseBranch + "/"
sphinx_md_githubFileURL = githubFileURL
sphinx_md_githubDirURL = githubDirURL
| 3,441 | 32.096154 | 114 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_meta_registrations.py | import functools
from typing import List, Optional
import torch
import torch.library
from torch._prims_common import IntLike
@functools.lru_cache(None)
def get_meta_lib():
return torch.library.Library("torch_ipex", "IMPL", "Meta")
def register_meta(op_name, overload_name="default"):
def wrapper(fn):
get_meta_lib().impl(
getattr(getattr(torch.ops.torch_ipex, op_name), overload_name), fn
)
return fn
return wrapper
def calc_conv_nd_return_shape(
input_tensor,
weight_size,
stride,
padding,
dilation,
is_transposed,
groups,
output_padding,
):
def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
Returns:
The output length
"""
return (ln + 2 * p - d * (k - 1) - 1) // s + 1
def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
if transposed convolution is used.
See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
op: output padding in that dim
Returns:
The output length
"""
return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
kernel_size = weight_size[2:]
dims = input_tensor.shape[2:]
if is_transposed:
out_channels = groups * weight_size[1]
else:
out_channels = weight_size[0]
ret_shape = [input_tensor.shape[0], out_channels]
if isinstance(stride, IntLike):
stride = [stride] * len(dims)
elif len(stride) == 1:
stride = [stride[0]] * len(dims)
if isinstance(padding, IntLike):
padding = [padding] * len(dims)
elif len(padding) == 1:
padding = [padding[0]] * len(dims)
if isinstance(dilation, IntLike):
dilation = [dilation] * len(dims)
elif len(dilation) == 1:
dilation = [dilation[0]] * len(dims)
output_padding_list: Optional[List[int]] = None
if output_padding:
if isinstance(output_padding, IntLike):
output_padding_list = [output_padding] * len(dims)
elif len(output_padding) == 1:
output_padding_list = [output_padding[0]] * len(dims)
else:
output_padding_list = output_padding
for i in range(len(dims)):
# If output_padding is present, we are dealing with a transposed convolution
if output_padding_list:
ret_shape.append(
_formula_transposed(
dims[i],
padding[i],
dilation[i],
kernel_size[i],
stride[i],
output_padding_list[i],
)
)
else:
ret_shape.append(
_formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i])
)
return ret_shape
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
def is_channels_last_3d(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last_3d
@register_meta("convolution_forward")
def meta_convolution_forward(
input,
weight,
bias,
W_prepack,
kernel_size,
padding,
stride,
dilation,
weight_channels_last,
):
shape_out = calc_conv_nd_return_shape(
input,
kernel_size,
stride,
padding,
dilation,
False,
None,
None,
)
use_channels_last = (
is_channels_last(input) or is_channels_last_3d(input) or weight_channels_last
)
memory_format = torch.contiguous_format
if use_channels_last:
if input.dim() == 4:
memory_format = torch.channels_last
elif input.dim() == 5:
memory_format = torch.channels_last_3d
out = input.new_empty(shape_out)
out = out.to(memory_format=memory_format) # type: ignore[call-overload]
return out
@register_meta("convolution_backward")
def meta_convolution_backward(
input,
weight,
bias,
grad_output,
out_mask,
W_prepack,
weight_channels_last,
):
use_channels_last = (
is_channels_last(input) or is_channels_last_3d(input) or weight_channels_last
)
memory_format = torch.contiguous_format
if use_channels_last:
if input.dim() == 4:
memory_format = torch.channels_last
elif input.dim() == 5:
memory_format = torch.channels_last_3d
backend_grad_input = None
backend_grad_weight = None
backend_grad_bias = None
if out_mask[0]:
backend_grad_input = grad_output.new_empty(input.size()).to(
memory_format=memory_format
)
if out_mask[1]:
backend_grad_weight = grad_output.new_empty(weight.size())
if out_mask[2]:
backend_grad_bias = grad_output.new_empty(bias.size())
return (backend_grad_input, backend_grad_weight, backend_grad_bias)
@register_meta("conv_transpose")
def meta_conv_transpose(
input,
weight,
bias_opt,
W_prepack,
weight_size,
padding,
output_padding,
stride,
dilation,
groups,
weight_channels_last,
):
shape_out = calc_conv_nd_return_shape(
input,
weight_size,
stride,
padding,
dilation,
True,
groups,
output_padding,
)
use_channels_last = (
is_channels_last(input) or is_channels_last_3d(input) or weight_channels_last
)
memory_format = torch.contiguous_format
if use_channels_last:
if input.dim() == 4:
memory_format = torch.channels_last
elif input.dim() == 5:
memory_format = torch.channels_last_3d
out = input.new_empty(shape_out)
out = out.to(memory_format=memory_format) # type: ignore[call-overload]
return out
@register_meta("conv_transpose_backward")
def meta_conv_transpose_backward(
input,
weight,
bias,
grad_output,
out_mask,
W_prepack,
weight_channels_last,
):
use_channels_last = (
is_channels_last(input) or is_channels_last_3d(input) or weight_channels_last
)
memory_format = torch.contiguous_format
if use_channels_last:
if input.dim() == 4:
memory_format = torch.channels_last
elif input.dim() == 5:
memory_format = torch.channels_last_3d
backend_grad_input = None
backend_grad_weight = None
backend_grad_bias = None
if out_mask[0]:
backend_grad_input = grad_output.new_empty(input.size()).to(
memory_format=memory_format
)
if out_mask[1]:
backend_grad_weight = grad_output.new_empty(weight.size())
if out_mask[2]:
backend_grad_bias = grad_output.new_empty(bias.size())
return (backend_grad_input, backend_grad_weight, backend_grad_bias)
@register_meta("ipex_linear")
def meta_ipex_linear(
input,
weight,
bias,
W_prepack,
out_features,
):
return input.new_empty((*input.shape[:-1], out_features))
@register_meta("linear_backward")
def meta_linear_backward(
input,
weight,
bias,
grad_output,
out_mask,
W_prepack,
):
backend_grad_input = None
backend_grad_weight = None
backend_grad_bias = None
if out_mask[0]:
backend_grad_input = grad_output.new_empty(input.size())
if out_mask[1]:
backend_grad_weight = grad_output.new_empty(weight.size())
if out_mask[2]:
backend_grad_bias = grad_output.new_empty(bias.size())
return (backend_grad_input, backend_grad_weight, backend_grad_bias)
@register_meta("ipex_MKLSGEMM")
def meta_ipex_MKLSGEMM(
input,
weight,
bias,
W_prepack,
out_features,
):
return input.new_empty((*input.shape[:-1], out_features))
@register_meta("embedding_bag")
def meta_embedding_bag(
weight,
indices,
offsets,
sparse,
include_last_offset,
):
num_bags = offsets.shape[0]
if indices.dim() == 2:
num_bags = indices.shape[0]
shape_out = [num_bags, weight.shape[1]]
return weight.new_empty(shape_out)
@register_meta("ipex_lstm")
def meta_ipex_lstm(
input,
hx,
params,
has_biases,
num_layers,
dropout_p,
train,
bidirectional,
batch_first,
):
shape_out = [*input.shape[:-1]]
shape_out.append(hx[0].shape[2] * 2 if bidirectional else hx[0].shape[2])
out = input.new_empty(shape_out)
hy = hx[0].new_empty(hx[0].size())
cy = hx[1].new_empty(hx[1].size())
return (out, hy, cy)
@register_meta("ROIAlign_forward")
def meta_ROIAlign_forward(
input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned
):
return input.new_empty(
(rois.shape[0], input.shape[1], pooled_height, pooled_width)
).to(memory_format=torch._prims_common.suggest_memory_format(input))
@register_meta("ROIAlign_backward")
def meta_ROIAlign_backward(
grad,
rois,
spatial_scale,
pooled_height,
pooled_width,
batch_size,
channels,
height,
width,
sampling_ratio,
aligned,
is_channels_last,
):
return grad.new_empty((batch_size, channels, height, width)).to(
memory_format=torch.channels_last
if is_channels_last
else torch.contiguous_format
)
| 9,941 | 24.492308 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/launcher.py | import sys
import argparse
import warnings
from functools import partial
from .cpu.launch import (
init_parser as cpu_init_parser,
run_main_with_args as cpu_run_main_with_args,
)
from .xpu.launch import (
init_parser as xpu_init_parser,
run_main_with_args as xpu_run_main_with_args,
)
def init_parser():
parser = argparse.ArgumentParser(
description="\n=================================== LAUNCHER ============================== \n"
"\nThis is a script for launching PyTorch training and inference on *Intel Xeon CPU* "
"or *Intel GPU* with optimal configurations. \n"
"\n################################# Basic usage ############################# \n"
"\n1. Run with CPU backend \n"
"\n >>> ipexrun cpu python_script args\n"
"\n2. Run with XPU backend\n"
"\n >>> ipexrun xpu python_script args\n"
"\n############################################################################# \n",
formatter_class=argparse.RawTextHelpFormatter,
)
subparsers = parser.add_subparsers(dest="backend", help="Run with specific Backend")
cpu_parser = subparsers.add_parser(
"cpu",
help="Run with CPU Backend",
description="This is a script for launching PyTorch training and inference on Intel Xeon CPU "
"with optimal configurations. Now, single instance inference/training, multi-instance "
"inference/training and distributed training with oneCCL backend is enabled. "
"To get the peak performance on Intel Xeon CPU, the script optimizes the configuration "
"of thread and memory management. For thread management, the script configures thread "
"affinity and the preload of Intel OMP library. For memory management, it configures "
"NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) "
"\n################################# Basic usage ############################# \n"
"\n 1. single instance\n"
"\n >>> ipexrun cpu python_script args \n"
"\n2. multi-instance \n"
"\n >>> ipexrun cpu --ninstances xxx --ncore_per_instance xx python_script args\n"
"\n3. Single-Node multi-process distributed training\n"
"\n >>> python -m intel_extension_for_pytorch.launch cpu --distributed python_script args\n"
"\n4. Multi-Node multi-process distributed training: (e.g. two nodes)\n"
"\n rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*\n"
"\n >>> ipexrun cpu --distributed --nproc_per_node=2\n"
"\n --nnodes=2 --hostfile hostfile python_script args\n"
"\n############################################################################# \n",
formatter_class=argparse.RawTextHelpFormatter,
)
xpu_parser = subparsers.add_parser(
"xpu",
help="Run with XPU Backend",
description="This is a script for launching PyTorch training and inference on Intel GPU Series "
"with optimal configurations."
"\n################################# Basic usage ############################# \n"
"\n single instance\n"
"\n >>> ipexrun xpu python_script args \n"
"\n############################################################################# \n",
formatter_class=argparse.RawTextHelpFormatter,
)
cpu_init_parser(cpu_parser)
xpu_init_parser(xpu_parser)
return parser, cpu_parser, xpu_parser
def mixed_print_help(f1, f2, f3):
f1()
print(
"\n================================ CPU LAUNCHER ============================= \n"
)
f2()
print(
"\n================================ XPU LAUNCHER ============================= \n"
)
f3()
def main():
parser, cpu_parser, xpu_parser = init_parser()
origin_parser_print_help = parser.print_help
cpu_parser_print_help = cpu_parser.print_help
xpu_parser_print_help = xpu_parser.print_help
parser.print_help = partial(
mixed_print_help,
f1=origin_parser_print_help,
f2=cpu_parser_print_help,
f3=xpu_parser_print_help,
)
if (
len(sys.argv) > 1
and "-h" not in sys.argv
and "--help" not in sys.argv
and sys.argv[1] != "cpu"
and sys.argv[1] != "xpu"
):
warnings.warn(
"Backend is not specified, it will automatically default to cpu.",
UserWarning,
)
sys.argv.insert(1, "cpu")
args = parser.parse_args()
if args.backend == "cpu":
cpu_run_main_with_args(args)
elif args.backend == "xpu":
xpu_run_main_with_args(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 4,753 | 39.288136 | 105 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/__init__.py | # This Python file uses the following encoding: utf-8
import re
import torch
import warnings
try:
import torchvision
except ImportError:
pass # skip if torchvision is not available
import os
import sys
import glob
import ctypes
import platform
from . import cpu
from . import xpu
from . import quantization
from . import nn
from . import jit
from . import optim
from . import fx
from . import _meta_registrations
try:
from .cpu import tpp
except BaseException:
warnings.warn(
"Please install transformers repo when you want to use fast_bert API."
)
from .frontend import optimize
from .frontend import enable_auto_channels_last, disable_auto_channels_last
from .frontend import set_fp32_math_mode, get_fp32_math_mode, FP32MathMode
from .cpu._auto_kernel_selection import _enable_dnnl, _disable_dnnl, _using_dnnl
from .cpu.utils.verbose import verbose
from .cpu.tpp.fused_bert import fast_bert
from ._inductor.compiler import _set_compiler_backend, _get_compiler_backend, compile
from ._inductor.dynamo_backends import *
from .cpu.onednn_fusion import enable_onednn_fusion
from . import _C
from ._version import (
__version__,
__ipex_gitrev__,
__torch_gitrev__,
__gpu_onednn_gitrev__,
__cpu_ideep_gitrev__,
__build_type__,
)
# Path to folder containing CMake definitions for torch ipex package
cmake_prefix_path = os.path.join(os.path.dirname(__file__), "share", "cmake")
torch_version = ""
ipex_version = ""
matches = re.match(r"(\d+\.\d+).*", torch.__version__)
if matches and len(matches.groups()) == 1:
torch_version = matches.group(1)
matches = re.match(r"(\d+\.\d+).*", __version__)
if matches and len(matches.groups()) == 1:
ipex_version = matches.group(1)
if torch_version == "" or ipex_version == "" or torch_version != ipex_version:
print(
"ERROR! Intel® Extension for PyTorch* needs to work with PyTorch \
{0}.*, but PyTorch {1} is found. Please switch to the matching version \
and run again.".format(
ipex_version, torch.__version__
)
)
exit(127)
from .cpu.utils import _cpu_isa, _custom_fx_tracer
_cpu_isa.check_minimal_isa_support()
def version():
print("intel_extension_for_pytorch version: {}".format(__version__))
print("intel_extension_for_pytorch git sha: {}".format(__ipex_gitrev__))
if len(__torch_gitrev__) != 0:
print("torch version and sha: {}".format(__torch_gitrev__))
print("submodule oneDNN sha: {}".format(__gpu_onednn_gitrev__))
print("submodule ideep sha: {}".format(__cpu_ideep_gitrev__))
| 2,613 | 28.370787 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/frontend.py | # This Python file uses the following encoding: utf-8
import copy
import warnings
import torch
import torch._dynamo
import torch.fx.experimental.optimization as optimization
from enum import IntFlag, IntEnum
from .nn import utils
from .optim._optimizer_utils import (
optimizer_fusion,
IPEX_FUSED_OPTIMIZER_LIST_CPU,
IPEX_FUSED_OPTIMIZER_LIST_XPU,
)
from .utils.channels_last_1d import to_channels_last_1d
from .cpu.utils.linear_bn_folding import linear_bn_fuse
from .cpu.graph_capture import GraphCapture
from .nn.utils._lstm_convert import _LSTM, replace_lstm_with_ipex_lstm
from .nn.utils._weight_prepack import _IPEXConv2d, _IPEXConvTranspose2d, _IPEXLinear
from .nn.utils._weight_prepack import weight_prepack_with_ipex, record_input_shape_for_prepack
from .cpu._auto_kernel_selection import (
_enable_dnnl,
_disable_dnnl,
)
from .fx.concat_linear import _concat_linear
import intel_extension_for_pytorch._C as core
def _copy_model_and_optimizer(model, optimizer):
new_model = copy.deepcopy(model)
if optimizer is None:
return new_model, optimizer
else:
new_optimizer = copy.deepcopy(optimizer)
dic_param = {}
dic_param_for_master_case = {}
for k, value in zip(model.parameters(), new_model.parameters()):
dic_param[k] = value
if hasattr(optimizer, "params_attr"):
params_attr = optimizer.params_attr
param_key_pair = {}
if len(params_attr) != 0:
new_params_attr = copy.deepcopy(params_attr)
for (k1, v1), (k2, v2) in zip(
params_attr.items(), new_params_attr.items()
):
if v1.master_parameter is None:
v2.parameter = dic_param[v1.parameter]
else:
dic_param_for_master_case[k1] = k2
param_key_pair[k1] = k2
if len(dic_param_for_master_case) != 0:
dic_param = dic_param_for_master_case
for k, v in param_key_pair.items():
new_params_attr[dic_param[k]] = new_params_attr.pop(v)
setattr(new_optimizer, "params_attr", new_params_attr) # noqa: B010
new_optimizer.state.clear()
# deep copy param_groups
for group1, group2 in zip(optimizer.param_groups, new_optimizer.param_groups):
for i, p in enumerate(group1["params"]):
if p in dic_param:
new_model_param = dic_param[p]
group2["params"][i] = new_model_param
new_optimizer.state[new_model_param] = copy.deepcopy(
optimizer.state[p]
)
def _attach_master_weight_split_attr(old_module, new_module):
if hasattr(old_module, "master_weight_split"):
setattr( # noqa: B010
new_module, "master_weight_split", old_module.master_weight_split
)
for (_, old_child), (_, new_child) in zip(
old_module.named_children(), new_module.named_children()
):
_attach_master_weight_split_attr(old_child, new_child)
_attach_master_weight_split_attr(model, new_model)
return new_model, new_optimizer
class auto_channels_last_flag(IntFlag):
AUTO = -1
DISABLE = 0
ENABLE = 1
auto_channels_last = auto_channels_last_flag.AUTO
def enable_auto_channels_last():
global auto_channels_last
auto_channels_last = auto_channels_last_flag.ENABLE
def disable_auto_channels_last():
global auto_channels_last
auto_channels_last = auto_channels_last_flag.DISABLE
class _Properties(object):
r"""
This class is to establish a set of default properties.
"""
def __init__(self):
self.opt_level = None
self.conv_bn_folding = None
self.weights_prepack = None
self.remove_dropout = None
# optimizer opt conig
self.split_master_weight_for_bf16 = None
self.fuse_update_step = None
self.auto_kernel_selection = None
self.graph_mode = None
# O0 properties
class _O0:
def __call__(self, properties):
properties.opt_level = "O0"
properties.conv_bn_folding = False
properties.linear_bn_folding = False
properties.weights_prepack = False
properties.replace_dropout_with_identity = False
properties.optimize_lstm = False
properties.split_master_weight_for_bf16 = False
properties.fuse_update_step = False
properties.auto_kernel_selection = False
properties.graph_mode = False
properties.concat_linear = False
return properties
# O1 properties
class _O1:
def __call__(self, properties):
properties.opt_level = "O1"
properties.conv_bn_folding = True
properties.linear_bn_folding = True
properties.weights_prepack = True
properties.replace_dropout_with_identity = True
properties.optimize_lstm = True
properties.split_master_weight_for_bf16 = True
properties.fuse_update_step = True
properties.auto_kernel_selection = False
properties.graph_mode = False
properties.concat_linear = False
return properties
opt_levels = {"O0": _O0(), "O1": _O1()}
def optimize(
model,
dtype=None,
optimizer=None,
level="O1",
inplace=False,
conv_bn_folding=None,
linear_bn_folding=None,
weights_prepack=None,
replace_dropout_with_identity=None,
optimize_lstm=None,
split_master_weight_for_bf16=None,
fuse_update_step=None,
auto_kernel_selection=None,
sample_input=None,
graph_mode=None,
concat_linear=None,
):
r"""
Apply optimizations at Python frontend to the given model (nn.Module), as
well as the given optimizer (optional). If the optimizer is given,
optimizations will be applied for training. Otherwise, optimization will be
applied for inference. Optimizations include ``conv+bn`` folding (for
inference only), weight prepacking and so on.
Weight prepacking is a technique to accelerate performance of oneDNN
operators. In order to achieve better vectorization and cache reuse, onednn
uses a specific memory layout called ``blocked layout``. Although the
calculation itself with ``blocked layout`` is fast enough, from memory usage
perspective it has drawbacks. Running with the ``blocked layout``, oneDNN
splits one or several dimensions of data into blocks with fixed size each
time the operator is executed. More details information about oneDNN data
mermory format is available at `oneDNN manual
<https://oneapi-src.github.io/oneDNN/dev_guide_understanding_memory_formats.html>`_.
To reduce this overhead, data will be converted to predefined block shapes
prior to the execution of oneDNN operator execution. In runtime, if the data
shape matches oneDNN operator execution requirements, oneDNN won't perform
memory layout conversion but directly go to calculation. Through this
methodology, called ``weight prepacking``, it is possible to avoid runtime
weight data format convertion and thus increase performance.
Args:
model (torch.nn.Module): User model to apply optimizations on.
dtype (torch.dtype): Only works for ``torch.bfloat16`` and ``torch.half`` a.k.a ``torch.float16``.
Model parameters will be casted to ``torch.bfloat16`` or ``torch.half``
according to dtype of settings. The default value is None, meaning do nothing.
Note: Data type conversion is only applied to ``nn.Conv2d``, ``nn.Linear``
and ``nn.ConvTranspose2d`` for both training and inference cases. For
inference mode, additional data type conversion is applied to the weights
of ``nn.Embedding`` and ``nn.LSTM``.
optimizer (torch.optim.Optimizer): User optimizer to apply optimizations
on, such as SGD. The default value is ``None``, meaning inference case.
level (string): ``"O0"`` or ``"O1"``. No optimizations are applied with
``"O0"``. The optimizer function just returns the original model and
optimizer. With ``"O1"``, the following optimizations are applied:
conv+bn folding, weights prepack, dropout removal (inferenc model),
master weight split and fused optimizer update step (training model).
The optimization options can be further overridden by setting the
following options explicitly. The default value is ``"O1"``.
inplace (bool): Whether to perform inplace optimization. Default value is
``False``.
conv_bn_folding (bool): Whether to perform ``conv_bn`` folding. It only
works for inference model. The default value is ``None``. Explicitly
setting this knob overwrites the configuration set by ``level`` knob.
linear_bn_folding (bool): Whether to perform ``linear_bn`` folding. It only
works for inference model. The default value is ``None``. Explicitly
setting this knob overwrites the configuration set by ``level`` knob.
weights_prepack (bool): Whether to perform weight prepack for convolution
and linear to avoid oneDNN weights reorder. The default value is
``None``. Explicitly setting this knob overwrites the configuration
set by ``level`` knob. For now, XPU doesn't support weights prepack.
replace_dropout_with_identity (bool): Whether to replace ``nn.Dropout``
with ``nn.Identity``. If replaced, the ``aten::dropout`` won't be
included in the JIT graph. This may provide more fusion opportunites
on the graph. This only works for inference model. The default value
is ``None``. Explicitly setting this knob overwrites the configuration
set by ``level`` knob.
optimize_lstm (bool): Whether to replace ``nn.LSTM`` with ``IPEX LSTM``
which takes advantage of oneDNN kernels to get better performance.
The default value is ``None``. Explicitly setting this knob
overwrites the configuration set by ``level`` knob.
split_master_weight_for_bf16 (bool): Whether to split master weights
update for BF16 training. This saves memory comparing to master
weight update solution. Split master weights update methodology
doesn't support all optimizers. The default value is None. The
default value is ``None``. Explicitly setting this knob overwrites
the configuration set by ``level`` knob.
fuse_update_step (bool): Whether to use fused params update for training
which have better performance. It doesn't support all optimizers.
The default value is ``None``. Explicitly setting this knob
overwrites the configuration set by ``level`` knob.
sample_input (tuple or torch.Tensor): Whether to feed sample input data to ipex.optimize. The shape of
input data will impact the block format of packed weight. If not feed a sample
input, Intel® Extension for PyTorch* will pack the weight per some predefined heuristics.
If feed a sample input with real input shape, Intel® Extension for PyTorch* can get
best block format.
auto_kernel_selection (bool) [experimental]: Different backends may have
different performances with different dtypes/shapes. Default value
is False. Intel® Extension for PyTorch* will try to optimize the
kernel selection for better performance if this knob is set to
``True``. You might get better performance at the cost of extra memory usage.
The default value is ``None``. Explicitly setting this knob overwrites the
configuration set by ``level`` knob.
graph_mode: (bool) [experimental]: It will automatically apply a combination of methods
to generate graph or multiple subgraphs if True. The default value is ``False``.
concat_linear (bool): Whether to perform ``concat_linear``. It only
works for inference model. The default value is ``None``. Explicitly
setting this knob overwrites the configuration set by ``level`` knob.
Returns:
Model and optimizer (if given) modified according to the ``level`` knob
or other user settings. ``conv+bn`` folding may take place and
``dropout`` may be replaced by ``identity``. In inference scenarios,
convolutuon, linear and lstm will be replaced with the optimized
counterparts in Intel® Extension for PyTorch* (weight prepack for
convolution and linear) for good performance. In bfloat16 or float16 scenarios,
parameters of convolution and linear will be casted to bfloat16 or float16 dtype.
.. warning::
Please invoke ``optimize`` function BEFORE invoking DDP in distributed
training scenario.
The ``optimize`` function deepcopys the original model. If DDP is invoked
before ``optimize`` function, DDP is applied on the origin model, rather
than the one returned from ``optimize`` function. In this case, some
operators in DDP, like allreduce, will not be invoked and thus may cause
unpredictable accuracy loss.
Examples:
>>> # bfloat16 inference case.
>>> model = ...
>>> model.load_state_dict(torch.load(PATH))
>>> model.eval()
>>> optimized_model = ipex.optimize(model, dtype=torch.bfloat16)
>>> # running evaluation step.
>>> # bfloat16 training case.
>>> optimizer = ...
>>> model.train()
>>> optimized_model, optimized_optimizer = ipex.optimize(model, dtype=torch.bfloat16, optimizer=optimizer)
>>> # running training step.
`torch.xpu.optimize()` is an alternative of optimize API in Intel® Extension for PyTorch*,
to provide identical usage for XPU device only. The motivation of adding this alias is
to unify the coding style in user scripts base on torch.xpu modular.
Examples:
>>> # bfloat16 inference case.
>>> model = ...
>>> model.load_state_dict(torch.load(PATH))
>>> model.eval()
>>> optimized_model = torch.xpu.optimize(model, dtype=torch.bfloat16)
>>> # running evaluation step.
>>> # bfloat16 training case.
>>> optimizer = ...
>>> model.train()
>>> optimized_model, optimized_optimizer = torch.xpu.optimize(model, dtype=torch.bfloat16, optimizer=optimizer)
>>> # running training step.
"""
if isinstance(model, torch.jit.ScriptModule):
if optimizer is None:
return model
return model, optimizer
if model.training:
assert optimizer is not None, "The optimizer should be given for training mode"
else:
assert optimizer is None, "The optimizer should not be given for inference mode"
opt_properties = _Properties()
if level not in opt_levels:
raise RuntimeError(
f"Unexpected optimization level {level}. Options are 'O0', 'O1'."
)
else:
opt_properties = opt_levels[level](opt_properties)
device_type = "cpu"
model_parameters_list = list(model.parameters())
if len(model_parameters_list) and model_parameters_list[0].device.type == "xpu":
if not all([param.device.type == "xpu" for param in model_parameters_list]):
raise RuntimeError("The model is mixed with different device type")
else:
device_type = "xpu"
global auto_channels_last
def xpu_check_channel_last():
global auto_channels_last
if auto_channels_last.value == auto_channels_last_flag.ENABLE:
return True
elif (
auto_channels_last.value == auto_channels_last_flag.AUTO
and torch.xpu.has_2d_block_array()
):
return True
else:
return False
if device_type == "cpu" and (
auto_channels_last.value != auto_channels_last_flag.DISABLE
):
_convert_convNd_deconvNd_weight_memory_format(model)
elif device_type == "xpu" and xpu_check_channel_last():
_convert_convNd_deconvNd_weight_memory_format(model)
if level is not None:
opt_properties.opt_level = level
if conv_bn_folding is not None:
opt_properties.conv_bn_folding = conv_bn_folding
if linear_bn_folding is not None:
opt_properties.linear_bn_folding = linear_bn_folding
if weights_prepack is not None:
opt_properties.weights_prepack = weights_prepack
if replace_dropout_with_identity is not None:
opt_properties.replace_dropout_with_identity = replace_dropout_with_identity
if optimize_lstm is not None:
opt_properties.optimize_lstm = optimize_lstm
if split_master_weight_for_bf16 is not None:
opt_properties.split_master_weight_for_bf16 = split_master_weight_for_bf16
if fuse_update_step is not None:
opt_properties.fuse_update_step = fuse_update_step
if auto_kernel_selection is not None:
opt_properties.auto_kernel_selection = auto_kernel_selection
if graph_mode is not None:
opt_properties.graph_mode = graph_mode
if concat_linear is not None:
opt_properties.concat_linear = concat_linear
_disable_dnnl()
if opt_properties.auto_kernel_selection:
_enable_dnnl()
# when on xpu, some features are not supported
if device_type == "xpu":
if opt_properties.auto_kernel_selection:
warnings.warn(
"For XPU device, the auto kernel selection is unsupported, so disable it."
)
opt_properties.auto_kernel_selection = False
if opt_properties.split_master_weight_for_bf16:
# currently split master weight for xpu only support sgd
if type(optimizer) is torch.optim.SGD:
opt_properties.split_master_weight_for_bf16 = True
else:
opt_properties.split_master_weight_for_bf16 = False
if opt_properties.graph_mode:
warnings.warn(
"For XPU, the oob solution for inference is to trace model outside of the torch.xpu.optimize,"
+ " so temp to disable the graph mode"
)
opt_properties.graph_mode = False
if not inplace:
warnings.warn(
"For XPU device to save valuable device memory, temp to do optimization on inplaced model,"
+ " so make inplace to be true"
)
inplace = True
# for XPU, weight prepack is unsupported, so sample input is useless
if opt_properties.weights_prepack:
warnings.warn(
"For XPU, the weight prepack and sample input are disabled. The onednn layout"
+ " is automatically chosen to use"
)
opt_properties.weights_prepack = False
sample_input = None
if opt_properties.optimize_lstm is not None:
warnings.warn(
"For XPU, the optimize_lstm(replace lstm with ipex_lstm) is unsupported, so disable it"
)
opt_properties.optimize_lstm = False
if inplace:
optimized_model = model
optimized_optimizer = optimizer
else:
optimized_model, optimized_optimizer = _copy_model_and_optimizer(
model, optimizer
)
if sample_input is not None:
if isinstance(sample_input, torch.Tensor):
sample_input = (sample_input,)
record_input_shape_for_prepack(optimized_model, sample_input)
params_attr = {}
if not model.training:
if opt_properties.conv_bn_folding:
try:
optimized_model = optimization.fuse(optimized_model, inplace=True)
except: # noqa E722
warnings.warn(
"Conv BatchNorm folding failed during the optimize process."
)
if opt_properties.linear_bn_folding:
try:
optimized_model = linear_bn_fuse(optimized_model, inplace=True)
except BaseException:
warnings.warn(
"Linear BatchNorm folding failed during the optimize process."
)
if opt_properties.replace_dropout_with_identity:
utils._model_convert.replace_dropout_with_identity(optimized_model)
if opt_properties.concat_linear:
optimized_model = _concat_linear(optimized_model, inplace=True)
if dtype in (
torch.bfloat16,
torch.float16,
):
params_attr, optimized_model = utils._model_convert.convert_model_data_type(
optimized_model, dtype
)
if opt_properties.optimize_lstm:
replace_lstm_with_ipex_lstm(optimized_model, optimized_optimizer)
if (
model.training
and opt_properties.split_master_weight_for_bf16
and dtype is torch.bfloat16
):
if not opt_properties.fuse_update_step:
opt_properties.split_master_weight_for_bf16 = False
warnings.warn(
"IPEX does not non-fused split master weight for bf16 training, "
+ "have reset split_master_weight_for_bf16 flag to False. "
+ "If you want to use split_master_weight_for_bf16. "
+ "Please set both split_master_weight_for_bf16 and fuse_update_step to True."
)
elif (
type(optimizer) not in IPEX_FUSED_OPTIMIZER_LIST_CPU
and device_type == "cpu"
):
opt_properties.split_master_weight_for_bf16 = False
opt_properties.fuse_update_step = False
warnings.warn(
"IPEX CPU does not support fused/fused split update for "
+ str(type(optimizer))
+ " will use non-fused master weight update for bf16 training on CPU."
)
elif (
type(optimizer) not in IPEX_FUSED_OPTIMIZER_LIST_XPU
and device_type == "xpu"
):
opt_properties.split_master_weight_for_bf16 = False
opt_properties.fuse_update_step = False
warnings.warn(
"IPEX XPU does not support fused/fused split update for "
+ str(type(optimizer))
+ " will use non-fused master weight update for bf16 training on XPU."
)
if model.training:
if hasattr(optimized_optimizer, "params_attr"):
params_attr = optimized_optimizer.params_attr
if dtype == torch.float16:
assert (
device_type != "xpu"
), "For now, XPU device does not support model training with half precision."
opt_properties.split_master_weight_for_bf16 = False
if dtype in (torch.bfloat16, torch.float16):
# convert optimizer for training case.
(
optimized_model,
optimized_optimizer,
params_attr,
) = utils._weight_cast.weight_dtype_convert_with_ipex(
optimized_model,
optimized_optimizer,
params_attr,
opt_properties.split_master_weight_for_bf16,
dtype,
)
# Since TorchDynamo cannot handle custom operations yet, for the case of inference graph mode,
# the weights prepacking here is temporarily cancelled, and it will be completed on the graph.
if opt_properties.weights_prepack and device_type == "cpu":
if dtype == torch.bfloat16:
assert core.onednn_has_bf16_support(), (
"BF16 weight prepack needs the cpu support avx512bw, avx512vl and avx512dq, "
+ "please set dtype to torch.float or set weights_prepack to False."
)
if dtype == torch.half:
assert core.onednn_has_fp16_support(), (
"FP16 weight prepack needs the cpu support avx512_core_fp16, "
+ "please set dtype to torch.float or set weights_prepack to False."
)
(
optimized_model,
optimized_optimizer,
params_attr,
) = weight_prepack_with_ipex(
optimized_model, optimized_optimizer, params_attr, "cpu"
)
torch._dynamo.allow_in_graph(_IPEXConv2d)
torch._dynamo.allow_in_graph(_IPEXConvTranspose2d)
torch._dynamo.allow_in_graph(_IPEXLinear)
torch._dynamo.allow_in_graph(_LSTM)
if opt_properties.graph_mode:
_old_forward = optimized_model.forward
wrapper = GraphCapture(
optimized_model,
optimizer is not None,
dtype,
opt_properties.weights_prepack,
)
optimized_model.forward = wrapper(_old_forward)
if optimizer is None:
return optimized_model
# with an optimizer
if opt_properties.fuse_update_step:
optimized_optimizer = optimizer_fusion(
optimized_optimizer,
opt_properties.split_master_weight_for_bf16,
device_type,
)
return optimized_model, optimized_optimizer
def _convert_convNd_deconvNd_weight_memory_format(module):
# inspired from https://github.com/pytorch/pytorch/blob/master/torch/nn/utils/memory_format.py
if isinstance(module, (torch.nn.Conv1d, torch.nn.ConvTranspose1d)):
weight_data = to_channels_last_1d(module.weight.detach().clone())
module.weight.data = weight_data.resize_(weight_data.size())
elif isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
weight_data = (
module.weight.detach().clone().contiguous(memory_format=torch.channels_last)
)
module.weight.data = weight_data.resize_(
weight_data.size(), memory_format=torch.channels_last
)
elif isinstance(module, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)):
weight_data = (
module.weight.detach()
.clone()
.contiguous(memory_format=torch.channels_last_3d)
)
module.weight.data = weight_data.resize_(
weight_data.size(), memory_format=torch.channels_last_3d
)
for child in module.children():
_convert_convNd_deconvNd_weight_memory_format(child)
class FP32MathMode(IntEnum):
FP32 = int(core.FP32MathMode.FP32)
TF32 = int(core.FP32MathMode.TF32)
BF32 = int(core.FP32MathMode.BF32)
def set_fp32_math_mode(mode=FP32MathMode.FP32, device="cpu"):
r"""
Enable or disable implicit data type conversion.
Args:
mode (FP32MathMode): ``FP32MathMode.FP32``, ``FP32MathMode.BF32`` or
``FP32MathMode.TF32`` (GPU ONLY). oneDNN fpmath mode will be disabled by default if dtype
is set to ``FP32MathMode.FP32``. The implicit ``FP32`` to ``TF32`` data type conversion
will be enabled if dtype is set to ``FP32MathMode.TF32``. The implicit ``FP32``
to ``BF16`` data type conversion will be enabled if dtype is set to ``FP32MathMode.BF32``.
device (string): ``cpu``, ``xpu``
Examples:
>>> import intel_extension_for_pytorch as ipex
>>> # to enable the implicit data type conversion
>>> ipex.set_fp32_math_mode(device="xpu", mode=ipex.FP32MathMode.BF32)
>>> # to disable the implicit data type conversion
>>> ipex.set_fp32_math_mode(device="xpu", mode=ipex.FP32MathMode.FP32)
``torch.xpu.set_fp32_math_mode()`` is an alternative function in Intel® Extension for PyTorch*,
to provide identical usage for XPU device only. The motivation of adding this alias is
to unify the coding style in user scripts base on ``torch.xpu`` modular.
Examples:
>>> import intel_extension_for_pytorch as ipex
>>> # to enable the implicit data type conversion
>>> torch.xpu.set_fp32_math_mode(device="xpu", mode=ipex.FP32MathMode.BF32)
>>> # to disable the implicit data type conversion
>>> torch.xpu.set_fp32_math_mode(device="xpu", mode=ipex.FP32MathMode.FP32)
"""
if device == "cpu":
if mode == FP32MathMode.BF32:
core.set_fp32_math_mode(core.FP32MathMode.BF32)
elif mode == FP32MathMode.FP32:
core.set_fp32_math_mode(core.FP32MathMode.FP32)
else:
warnings.warn(
"For CPU device, IPEX does not support mode except \
FP32MathMode.FP32 and FP32MathMode.BF32 for fpmath_mode right now."
)
elif device == "xpu":
if mode == FP32MathMode.BF32:
torch.xpu.set_fp32_math_mode(torch.xpu.FP32MathMode.BF32)
elif mode == FP32MathMode.FP32:
torch.xpu.set_fp32_math_mode(torch.xpu.FP32MathMode.FP32)
elif mode == FP32MathMode.TF32:
torch.xpu.set_fp32_math_mode(torch.xpu.FP32MathMode.TF32)
else:
warnings.warn(
"For XPU device, IPEX does not support mode except \
FP32MathMode.FP32, FP32MathMode.BF32 and FP32MathMode.TF32 for fpmath_mode right now."
)
else:
raise RuntimeError(
"Unexpected device type {}. ".format(device) + "Supported are 'cpu', 'xpu'."
)
def get_fp32_math_mode(device="cpu"):
r"""
Get the current fpmath_mode setting.
Args:
device (string): ``cpu``, ``xpu``
Returns:
Fpmath mode
The value will be ``FP32MathMode.FP32``, ``FP32MathMode.BF32`` or ``FP32MathMode.TF32`` (GPU ONLY).
oneDNN fpmath mode will be disabled by default if dtype is set to ``FP32MathMode.FP32``.
The implicit ``FP32`` to ``TF32`` data type conversion will be enabled if dtype is set
to ``FP32MathMode.TF32``. The implicit ``FP32`` to ``BF16`` data type conversion will be
enabled if dtype is set to ``FP32MathMode.BF32``.
Examples:
>>> import intel_extension_for_pytorch as ipex
>>> # to get the current fpmath mode
>>> ipex.get_fp32_math_mode(device="xpu")
``torch.xpu.get_fp32_math_mode()`` is an alternative function in Intel® Extension for PyTorch*,
to provide identical usage for XPU device only. The motivation of adding this alias is
to unify the coding style in user scripts base on ``torch.xpu`` modular.
Examples:
>>> import intel_extension_for_pytorch as ipex
>>> # to get the current fpmath mode
>>> torch.xpu.get_fp32_math_mode(device="xpu")
"""
if device == "cpu":
return core.get_fp32_math_mode()
elif device == "xpu":
return torch.xpu.get_fp32_math_mode()
else:
raise RuntimeError(
"Unexpected device type {}. ".format(device) + "Supported are 'cpu', 'xpu'."
)
| 31,056 | 42.375698 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/modules/weight_only_quantization.py | import torch
import torch.ao.nn.quantized as nnq
from torch.ao.nn.quantized.modules.utils import _quantize_weight
import torch.ao.nn.intrinsic as nni
from ...quantization._qconfig import get_weight_only_quant_qconfig_mapping
class IpexWoqLinear(nnq.Linear):
r"""
A weight-only quantized (WOQ) linear module with floating point tensor as inputs and outputs.
Weight is dequantized at runtime for computation.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module which are of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable floating point bias of the module of shape
:math:`(\text{out\_features})`. If :attr:`bias` is ``True``,
the values are initialized to zero.
Examples::
>>> # xdoctest: +SKIP
>>> m = ipex.nn.IpexWoqLinear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
# version used in this class is different from the parent class nnq.Linear
_version = 4
def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8):
# nnq.Linear does not support quint4x2 so we set qint8 here as a hack
# This dtype is used for weight prepacking and we do not rely on the prepacking
# of nnq.Linear. So, it won't affect our implementation here.
super().__init__(in_features, out_features, bias_, dtype=torch.qint8)
weight = torch.rand(out_features, in_features)
qweight = torch.quantize_per_channel(
weight, torch.ones(out_features), torch.zeros(out_features), 0, dtype
)
bias = torch.rand(out_features)
self._op_context = torch.ops.ipex_prepack.weight_only_qlinear_prepack(
qweight, bias, None
)
self.weight_qscheme = self.weight().qscheme()
del weight
del qweight
def forward(self, x):
# Note that we can handle self.bias == None case.
if self._packed_params.dtype in [torch.qint8, torch.quint4x2]:
Y = torch.ops.torch_ipex.ipex_woq_linear(
x, self._op_context.get_data_handle()
)
else:
raise RuntimeError("Unsupported dtype of wegiht only quantized linear!")
return Y.to(x.dtype)
def _get_name(self):
return "IpexWeightOnlyQuantizedLinear"
def extra_repr(self):
extra_repr_str = "in_features={}, out_features={}, dtype={}".format(
self.in_features, self.out_features, self._packed_params.dtype
)
if self._packed_params.dtype in [torch.qint8, torch.quint4x2]:
extra_repr_str += ", qscheme={}".format(self.weight_qscheme)
return extra_repr_str
def _save_to_state_dict(self, destination, prefix, keep_vars):
assert (
not keep_vars
), "can not using keep_vars true when to save _IPEXConvNd's parameters"
if self.bias is not None:
bias = self.bias.float()
destination[prefix + "bias"] = bias.detach()
weight = self.weight.float()
destination[prefix + "weight"] = self.ctx.to_public(weight.detach())
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
with torch.no_grad():
w_name = prefix + "weight"
b_name = prefix + "bias"
fp32_loaded_weight = state_dict[w_name]
loaded_weight = fp32_loaded_weight.to(self.weight.dtype)
if b_name in state_dict:
loaded_bias = state_dict[b_name]
loaded_bias = loaded_bias.to(self.bias.dtype)
else:
loaded_bias = None
self._op_context = torch.ops.ipex_prepack.weight_only_qlinear_prepack(
loaded_weight, loaded_bias, None
)
@classmethod
def from_float(cls, mod):
r"""Create a weight-only quantized module from a float module or qparams_dict
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
float_modules = [torch.nn.Linear]
assert (
type(mod) in float_modules
), "IpexWoqLinear.from_float only works for one of" + str(
[float_mod.__name__ for float_mod in float_modules]
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
if type(mod) == nni.LinearReLU:
mod = mod[0]
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer = mod.qconfig.weight()
else:
weight_observer = (
get_weight_only_quant_qconfig_mapping().global_qconfig.weight()
)
dtype = weight_observer.dtype
assert dtype in [torch.qint8, torch.quint4x2], (
"The only supported dtypes for "
"weight-only quantized linear are qint8 and quint4x2 got: {}".format(dtype)
)
weight_observer(mod.weight)
if dtype in [torch.qint8, torch.quint4x2]:
qweight = _quantize_weight(mod.weight.float(), weight_observer)
else:
raise RuntimeError(
"Unsupported dtype specified for dynamic quantized Linear!"
)
qlinear = cls(mod.in_features, mod.out_features, dtype=dtype)
qlinear._op_context = torch.ops.ipex_prepack.weight_only_qlinear_prepack(
qweight, mod.bias, None
)
qlinear.weight_qscheme = qlinear.weight().qscheme()
del qweight
return qlinear
@classmethod
def from_reference(cls, ref_qlinear):
"""Create a weight-only quantized module from a reference quantized module
Args:
ref_qlinear (Module): a reference quantized module, either produced by
torch.ao.quantization functions or provided by the user
"""
qlinear = cls(
ref_qlinear.in_features,
ref_qlinear.out_features,
dtype=ref_qlinear.weight_dtype,
)
qweight = ref_qlinear.get_quantized_weight()
bias = ref_qlinear.bias
# qlinear.set_weight_bias(qweight, bias)
qlinear._op_context = torch.ops.ipex_prepack.weight_only_qlinear_prepack(
qweight, bias, None
)
qlinear.weight_qscheme = qlinear.weight().qscheme()
return qlinear
| 6,951 | 39.184971 | 97 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/modules/merged_embeddingbag.py | import torch
from torch import Tensor, nn
from torch.autograd import Function
from typing import List, Optional, NamedTuple
from itertools import accumulate
import enum
class PoolingMode(enum.IntEnum):
SUM = 0
MEAN = 1
class SGDArgs(NamedTuple):
bf16_trail: List[Optional[torch.Tensor]]
weight_decay: float
lr: float
class EmbeddingSpec(NamedTuple):
num_of_features: int
feature_size: int
pooling_modes: str
dtype: torch.dtype
weight: Optional[torch.Tensor]
sparse: bool
def merged_embeddingbag(
indices, offsets, indices_with_row_offsets, row_offsets, pooling_modes, *weights
):
if torch.is_grad_enabled():
return MergedEmbeddingBagFunc.apply(
indices,
offsets,
indices_with_row_offsets,
row_offsets,
pooling_modes,
*weights
)
return torch.ops.torch_ipex.merged_embeddingbag_forward(
indices, offsets, weights, pooling_modes
)
def merged_embeddingbag_sgd(
indices,
offsets,
indices_with_row_offsets,
row_offsets,
pooling_modes,
sgd_args,
*weights
):
if torch.is_grad_enabled():
return MergedEmbeddingBagSGDFunc.apply(
indices,
offsets,
indices_with_row_offsets,
row_offsets,
pooling_modes,
sgd_args,
*weights
)
return torch.ops.torch_ipex.merged_embeddingbag_forward(
indices, offsets, weights, pooling_modes
)
class MergedEmbeddingBagFunc(Function):
@staticmethod
def unpack(*args):
return args
@staticmethod
def forward(
ctx,
indices,
offsets,
indices_with_row_offsets,
row_offsets,
pooling_modes,
*weights
):
output = torch.ops.torch_ipex.merged_embeddingbag_forward(
indices, offsets, weights, pooling_modes
)
ctx.offsets = offsets
ctx.weights = weights
ctx.indices_with_row_offsets = indices_with_row_offsets
ctx.row_offsets = row_offsets
ctx.pooling_modes = pooling_modes
return MergedEmbeddingBagFunc.unpack(*output)
@staticmethod
def backward(ctx, *grad_out):
offsets = ctx.offsets
weights = ctx.weights
indices_with_row_offsets = ctx.indices_with_row_offsets
row_offsets = ctx.row_offsets
pooling_modes = ctx.pooling_modes
grad_list = torch.ops.torch_ipex.merged_embeddingbag_backward_cpu(
grad_out,
offsets,
weights,
indices_with_row_offsets,
row_offsets,
pooling_modes,
)
n_tables = len(weights)
output = [None for i in range(5)]
for grad in grad_list:
output.append(grad)
return MergedEmbeddingBagFunc.unpack(*output)
class MergedEmbeddingBagSGDFunc(Function):
@staticmethod
def unpack(*args):
return args
@staticmethod
def forward(
ctx,
indices,
offsets,
indices_with_row_offsets,
row_offsets,
pooling_modes,
sgd_args,
*weights
):
output = torch.ops.torch_ipex.merged_embeddingbag_forward(
indices, offsets, weights, pooling_modes
)
ctx.indices = indices
ctx.offsets = offsets
ctx.weights = weights
ctx.indices_with_row_offsets = indices_with_row_offsets
ctx.row_offsets = row_offsets
ctx.pooling_modes = pooling_modes
ctx.sgd_args = sgd_args
return MergedEmbeddingBagSGDFunc.unpack(*output)
@staticmethod
def backward(ctx, *grad_out):
indices = ctx.indices
offsets = ctx.offsets
weights = ctx.weights
indices_with_row_offsets = ctx.indices_with_row_offsets
row_offsets = ctx.row_offsets
pooling_modes = ctx.pooling_modes
sgd_args = ctx.sgd_args
bf16_trail = sgd_args.bf16_trail
weight_decay = sgd_args.weight_decay
lr = sgd_args.lr
torch.ops.torch_ipex.merged_embeddingbag_backward_sgd(
grad_out,
indices,
offsets,
weights,
indices_with_row_offsets,
row_offsets,
pooling_modes,
bf16_trail,
weight_decay,
lr,
)
n_tables = len(weights)
output = [None for i in range(n_tables + 6)]
return MergedEmbeddingBagSGDFunc.unpack(*output)
class MergedEmbeddingBag(nn.Module):
r"""
Merge multiple Pytorch `EmbeddingBag <https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html
#embeddingbag>`_ objects into a single `torch.nn.Module` object.
At the current stage:
`MergedEmbeddingBag` assumes to be constructed from `nn.EmbeddingBag` with `sparse=False`, returns dense gradients.
`MergedEmbeddingBagWithSGD` does not return gradients, backward step and weights update step are fused.
Native usage of multiple `EmbeddingBag` objects is:
>>> EmbLists = torch.nn.Modulist(emb1, emb2, emb3, ..., emb_m)
>>> inputs = [in1, in2, in3, ..., in_m]
>>> outputs = []
>>> for i in range(len(EmbLists)):
>>> outputs.append(Emb[in_i])
The optimized path is:
>>> EmbLists = torch.nn.Modulist(emb1, emb2, emb3, ..., emb_m)
>>> merged_emb = MergedEmbeddingBagWithSGD.from_embeddingbag_list(EmbLists)
>>> outputs = MergedEmbeddingBagWithSGD(inputs)
Computation benefits from the optimized path:
1). Pytorch OP dispatching overhead is minimized. If `EmbeddingBag` operations are not heavy, this dispatching
overhead brings big impact.
2). Parallelizations over embedding tables are merged into that over a single merged embedding table. This
could benefit low parallelization efficiency scenarios when data size read out from embedding tables are not
large enough.
A `linearize_indices_and_offsets` step is introduced to merge indices/offsets together. Consider that `EmbeddingBag`
objects are usually the first layer of a model, the `linearize_indices_and_offsets` step can be considered as "data
preprocess" and can be done offline. See usage of the `linearize_indices_and_offsets` in `MergedEmbeddingBagWithSGD`.
Now `MergedEmbeddingBagWithSGD` is the only option running with an optimizer. We plan to add more optimizer support
in the future. Visit `MergedEmbeddingBagWithSGD` for introduction of `MergedEmbeddingBagWith[Optimizer]`.
"""
embedding_specs: List[EmbeddingSpec]
def __init__(
self,
embedding_specs: List[EmbeddingSpec],
):
super(MergedEmbeddingBag, self).__init__()
self.n_tables = len(embedding_specs)
self.weights = []
row_offsets = []
feature_sizes = []
self.pooling_modes = []
self.dtypes = []
dtype = None
self.alldense = True
self.weights = torch.nn.ParameterList(
[nn.Parameter(torch.Tensor()) for i in range(len(embedding_specs))]
)
for i, emb in enumerate(embedding_specs):
num_of_features, feature_size, mode, dtype, weight, sparse = emb
row_offsets.append(num_of_features)
if mode == "sum":
self.pooling_modes.append(PoolingMode.SUM)
elif mode == "mean":
self.pooling_modes.append(PoolingMode.MEAN)
else:
AssertionError(
False
), r"MergedEmbeddingBag only support EmbeddingBag with model sum or mean"
if weight is None:
weight = torch.empty((num_of_features, feature_size), dtype=dtype)
self.weights[i] = nn.Parameter(weight)
if sparse:
self.alldense = False
self.register_buffer(
"row_offsets",
torch.tensor([0] + list(accumulate(row_offsets)), dtype=torch.int64),
)
@classmethod
def from_embeddingbag_list(
cls,
tables: List[torch.nn.EmbeddingBag],
):
embedding_specs = []
for emb in tables:
emb_shape = emb.weight.shape
assert (
not emb.sparse
), "MergedEmbeddingBag can only be used for dense gradient EmebddingBag. \
Please use MergedEmbeddingBagWith[Optimizer] for sparse gradient."
embedding_specs.append(
EmbeddingSpec(
num_of_features=emb_shape[0],
feature_size=emb_shape[1],
pooling_modes=emb.mode,
dtype=emb.weight.dtype,
weight=emb.weight.detach(),
sparse=emb.sparse,
)
)
return cls(embedding_specs)
def extra_repr(self) -> str:
s = "number of tables={}\n".format(self.n_tables)
for i in range(self.n_tables):
s += "table{}: {}, {}, {}, {}".format(
i,
self.weights[i].shape[0],
self.weights[i].shape[1],
self.pooling_modes[i],
self.weights[i].dtype,
)
if i != self.n_tables - 1:
s += "\n"
return s
def linearize_indices_and_offsets(
self,
indices: List[Tensor],
offsets: List[Optional[Tensor]],
include_last_offsets: List[bool],
):
r"""
To make backward/update more balance, we only have 1 logical table in MergedEmbedingBag and
use unified indices for access the whole logical table.
We need to re-mark the indice from different tables to distinguish them.
For example, we have 2 tables with shape [200, 128] and [100, 128].
The indice 50 for table1 is still 50 and the indice 50 for table2 should be set to 50 + 200 = 250.
We assume the original indice and offset will follow the usage for Pytorch EmbeddingBag:
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/sparse.py#L355-L382
"""
# TODO: support per_sample_weights in forward
def get_batch_size(indice, offset, include_last_offset):
if indice.dim() == 2:
assert (
offset is None
), "offset should be None if indice is 2-D tensor, \
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/sparse.py#L355-L382"
batch_size = indice.shape[0]
else:
batch_size = offset.numel()
if include_last_offset:
batch_size -= 1
return batch_size
assert self.n_tables == len(indices), "expected {} but got {} indices".format(
self.n_tables, len(indices)
)
assert self.n_tables == len(offsets), "expected {} but got {} offsets".format(
self.n_tables, len(offsets)
)
assert self.n_tables == len(
include_last_offsets
), "expected {} but got {} include_last_offsets".format(
self.n_tables, len(include_last_offsets)
)
batch_size = get_batch_size(indices[0], offsets[0], include_last_offsets[0])
assert all(
batch_size == get_batch_size(idx, offset, include_last)
for idx, offset, include_last in zip(indices, offsets, include_last_offsets)
), r"MergedEmbeddingBag only support input with same batch size"
n_indices = sum([t.numel() for t in indices])
n_offsets = batch_size * self.n_tables + 1 # include last offset
merged_indices = torch.empty(n_indices, dtype=torch.int64)
merged_indices_with_row_offsets = torch.empty(
n_indices, dtype=torch.int64
) # used for sort together
merged_offsets = torch.empty(n_offsets, dtype=torch.int64)
idx_start = 0
offset_start = 0
for i in range(self.n_tables):
n_indice = indices[i].numel()
merged_indices[idx_start : idx_start + n_indice].copy_(indices[i].view(-1))
merged_indices_with_row_offsets[idx_start : idx_start + n_indice].copy_(
indices[i].view(-1) + self.row_offsets[i]
)
if indices[i].dim() == 2:
bag_size = indices[i].shape[1]
offset = torch.arange(0, indices[i].numel(), bag_size)
else:
offset = offsets[i][:-1] if include_last_offsets[i] else offsets[i]
assert offset.numel() == batch_size
merged_offsets[offset_start : offset_start + batch_size].copy_(
offset + idx_start
)
idx_start += n_indice
offset_start += batch_size
assert idx_start == n_indices
assert offset_start == n_offsets - 1
merged_offsets[-1] = n_indices
return (merged_indices, merged_offsets, merged_indices_with_row_offsets)
def forward(
self, input, need_linearize_indices_and_offsets=torch.BoolTensor([True])
):
r"""
Args:
input (Tuple[Tensor]): a tuple of (indices, offsets, \
include_last_offsets(if not merged)/indices_with_row_offsets(if merged))
need_linearize_indices_and_offsets: indicate whether input need to be linearized
Returns:
List[Tensor] output shape of `(batch_size, feature_size)` which length = num of tables.
"""
assert (
self.alldense
), "MergedEmbeddingBag only support EmbeddingBag List with all dense gradient, please use \
MergedEmbeddingBagWith[Optimizer] for sparse gridient EmbeddingBag"
if need_linearize_indices_and_offsets.item():
indices, offsets, include_last_offsets = input
(
indices,
offsets,
indices_with_row_offsets,
) = self.linearize_indices_and_offsets(
indices, offsets, include_last_offsets
)
else:
indices, offsets, indices_with_row_offsets = input
return merged_embeddingbag(
indices,
offsets,
indices_with_row_offsets,
self.row_offsets,
self.pooling_modes,
*self.weights
)
class MergedEmbeddingBagWithSGD(MergedEmbeddingBag):
r"""
To support training with `MergedEmbeddingBag` for good performance, optimizer step is fused with backward function.
Native usage for multiple EmbeddingBag is:
>>> EmbLists = torch.nn.Modulist(emb1, emb2, emb3, ..., emb_m)
>>> sgd = torch.optim.SGD(EmbLists.parameters(), lr=lr, weight_decay=weight_decay)
>>> inputs = [in1, in2, in3, ..., in_m]
>>> outputs = []
>>> for i in range(len(EmbLists)):
>>> outputs.append(Emb[in_i])
>>> sgd.zero_grad()
>>> for i in range(len(outputs)):
>>> out.backward(grads[i])
>>> sgd.step()
The optimized path is:
>>> # create MergedEmbeddingBagWithSGD module with optimizer args (lr and weight decay)
>>> EmbLists = torch.nn.Modulist(emb1, emb2, emb3, ..., emb_m)
>>> merged_emb = MergedEmbeddingBagWithSGD.from_embeddingbag_list(EmbLists, lr=lr, weight_decay=weight_decay)
>>> # if you need to train with BF16 dtype, we provide split sgd on it
>>> # merged_emb.to_bfloat16_train()
>>> merged_input = merged_emb.linearize_indices_and_offsets(inputs)
>>> outputs = MergedEmbeddingBagWithSGD(merged_input, need_linearize_indices_and_offsets=torch.BoolTensor([False]))
>>> outputs.backward(grads)
Training benefits further from this optimization:
1). Pytorch OP dispatching overhead in backward and weight update process is saved.
2). Thread loading becomes more balanced during backward/weight update. In real world scenarios, `Embedingbag`
are often used to represent categorical features, while the categorical features often fit power law
distribution. For example, if we use one embedding table to represent the age range of a video game website
users, we might find most of them are between 10-19 or 20-29. So we may need to update the row which represent
10-19 or 20-29 frequently. Since updating these rows needs to write at the same memory address, we need to write
it by 1 thread (otherwise we will have write conflict or overhead to solve the conflict). The potential memory
write conflict can be simply addressed by merging multiple tables together.
3). Weights update is fused with backward together. We can immediately update the weight right after we get
gradients from the backward step and thus the memory access pattern becomes more friendly. Data access will
happen on cache more than on memory.
"""
embedding_specs: List[EmbeddingSpec]
def __init__(
self,
embedding_specs: List[EmbeddingSpec],
lr: float = 0.01,
weight_decay: float = 0,
):
super(MergedEmbeddingBagWithSGD, self).__init__(embedding_specs)
self.sgd_args = self.init_sgd_args(lr, weight_decay)
for i in range(self.n_tables):
weight = self.weights[i]
if weight.dtype == torch.bfloat16:
self.sgd_args.bf16_trail.append(
torch.zeros_like(weight, dtype=torch.bfloat16)
)
else:
self.sgd_args.bf16_trail.append(torch.empty(0, dtype=torch.bfloat16))
def init_sgd_args(self, lr, weight_decay, bf16_trail=None):
if bf16_trail is None:
bf16_trail = []
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
return SGDArgs(weight_decay=weight_decay, lr=lr, bf16_trail=bf16_trail)
def to_bfloat16_train(self):
r"""
Cast weight to bf16 and it's trail part for training
"""
trails = []
for i in range(len(self.weights)):
if self.weights[i].dtype == torch.float:
bf16_w, trail = torch.ops.torch_ipex.split_float_bfloat16(
self.weights[i]
)
elif self.weights[i].dtype == torch.bfloat16:
bf16_w = self.weights[i]
trail = torch.zeros_like(bf16_w, dtype=torch.bfloat16)
elif self.weights[i].dtype == torch.double:
bf16_w, trail = torch.ops.torch_ipex.split_float_bfloat16(
self.weights[i].float()
)
else:
AssertionError(
False
), r"MergedEmbeddingBag only support dtypes with bfloat, float and double"
trails.append(trail)
self.weights[i] = torch.nn.Parameter(bf16_w)
self.sgd_args = self.sgd_args._replace(bf16_trail=trails)
def forward(
self, input, need_linearize_indices_and_offsets=torch.BoolTensor([True])
):
r"""
Args:
input (Tuple[Tensor]): a tuple of (indices, offsets, \
include_last_offsets(if not merged)/indices_with_row_offsets(if merged))
need_linearize_indices_and_offsets: indicate whether input need to be linearized
Returns:
List[Tensor] output shape of `(batch_size, feature_size)` which length = num of tables.
"""
if need_linearize_indices_and_offsets.item():
indices, offsets, include_last_offsets = input
(
indices,
offsets,
indices_with_row_offsets,
) = self.linearize_indices_and_offsets(
indices, offsets, include_last_offsets
)
else:
indices, offsets, indices_with_row_offsets = input
return merged_embeddingbag_sgd(
indices,
offsets,
indices_with_row_offsets,
self.row_offsets,
self.pooling_modes,
self.sgd_args,
*self.weights
)
@classmethod
def from_embeddingbag_list(
cls,
tables: List[torch.nn.EmbeddingBag],
lr: float = 0.01,
weight_decay: float = 0,
):
embedding_specs = []
for emb in tables:
emb_shape = emb.weight.shape
embedding_specs.append(
EmbeddingSpec(
num_of_features=emb_shape[0],
feature_size=emb_shape[1],
pooling_modes=emb.mode,
dtype=emb.weight.dtype,
weight=emb.weight.detach(),
sparse=emb.sparse,
)
)
return cls(embedding_specs, lr, weight_decay)
| 21,015 | 36.7307 | 123 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/utils/_model_convert.py | import torch
from ._parameter_wrapper import get_shared_parameter_status, patch_state_dict
def replace_dropout_with_identity(model):
# replace dropout with identity during inference, so that aten::dropout won't be on the JIT graph.
# This optimization may provide more fusion opportunites on the graph.
if isinstance(model, torch.jit.ScriptModule):
return
if not model.training:
for child_name, child in model.named_children():
if isinstance(child, torch.nn.Dropout):
setattr(model, child_name, torch.nn.Identity())
else:
replace_dropout_with_identity(child)
def convert_model_data_type(model, dtype):
# convert weights(bias) of model to dtype to reduce dtype reorder
assert dtype in [
torch.bfloat16,
torch.float16,
], "model convert only support bf16 and fp16"
params_attr = {}
get_shared_parameter_status(model, params_attr)
for _, param in model.named_parameters():
if param is None:
continue
if params_attr[param].can_cast_inference(dtype):
params_attr[param].cast_for_inference(dtype)
patch_state_dict(model, params_attr, "inference")
return params_attr, model
| 1,250 | 33.75 | 102 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/utils/_parameter_wrapper.py | import torch
from typing import Set
import functools
import contextlib
import types
import warnings
from intel_extension_for_pytorch.cpu._auto_kernel_selection import _using_dnnl
from intel_extension_for_pytorch import frontend
from intel_extension_for_pytorch.nn.utils._weight_prepack import (
_IPEXLinear,
_IPEXConv1d,
_IPEXConv2d,
_IPEXConv3d,
_IPEXConvTranspose2d,
_IPEXConvTranspose3d,
_IPEXLinearAllreduce,
may_import_deepspeed_modules,
)
@functools.lru_cache(None)
def IPEX_WEIGHT_PREPACK_MODULE_CPU():
torch_modules = {
torch.nn.Linear: _IPEXLinear,
torch.nn.Conv2d: _IPEXConv2d,
torch.nn.Conv3d: _IPEXConv3d,
torch.nn.Conv1d: _IPEXConv1d,
torch.nn.ConvTranspose2d: _IPEXConvTranspose2d,
torch.nn.ConvTranspose3d: _IPEXConvTranspose3d,
}
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
LinearAllreduce, LinearLayer = deepspeed_modules
deepspeed_modules = {
LinearLayer: _IPEXLinear,
LinearAllreduce: _IPEXLinearAllreduce,
}
torch_modules.update(deepspeed_modules)
return torch_modules
@functools.lru_cache(None)
def IPEX_GEMM_MODULE_CPU():
torch_modules = [torch.nn.Linear]
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
torch_modules.extend(deepspeed_modules)
return torch_modules
@functools.lru_cache(None)
def IPEX_WEIGHT_CONVERT_MODULE_CPU(inference: bool, dtype: torch.bfloat16):
from ._lstm_convert import _LSTM
from intel_extension_for_pytorch.nn.modules import MergedEmbeddingBag
module_convert_list_bf16_inference = [
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.Linear,
torch.nn.Embedding,
torch.nn.LSTM,
]
module_convert_list_bf16_training = [
torch.nn.Conv1d,
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.Linear,
torch.nn.Embedding,
torch.nn.LSTM,
# TODO: why different with inference
MergedEmbeddingBag,
torch.nn.EmbeddingBag,
_LSTM,
torch.nn.ParameterList,
]
module_convert_list_fp16 = [
torch.nn.Conv1d,
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.Linear,
]
if dtype == torch.float16:
return module_convert_list_fp16
elif inference:
return module_convert_list_bf16_inference
else:
return module_convert_list_bf16_training
def _should_prepack(module, is_training, is_xpu=False):
if is_xpu:
return False
# for training, if auto_kernel_selection(onednn) is off, IPEX won't prepack FP32 linear.
if (
isinstance(module, torch.nn.Linear)
and not _using_dnnl()
and is_training
and module.weight.dtype is torch.float
):
return False
if isinstance(module, torch.nn.ConvTranspose2d):
if module.padding[0] - module.output_padding[0] + module.stride[0] <= 0:
return False
if module.padding[1] - module.output_padding[1] + module.stride[1] <= 0:
return False
if isinstance(module, torch.nn.ConvTranspose3d):
if module.padding[0] - module.output_padding[0] + module.stride[0] <= 0:
return False
if module.padding[1] - module.output_padding[1] + module.stride[1] <= 0:
return False
if module.padding[2] - module.output_padding[2] + module.stride[2] <= 0:
return False
# Conv1d backward is not implemented, will not prepack.
if isinstance(module, torch.nn.Conv1d) and module.training:
return False
if module.weight.dtype == torch.half and module.__class__ in (
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
):
return False
if module.weight.dtype not in (
torch.float,
torch.float32,
torch.bfloat16,
torch.half,
):
return False
return True
def get_shared_parameter_status(module, shared_p):
visited_wrapper = []
for _, param in module._parameters.items():
if param is None:
continue
if param not in shared_p:
shared_p[param] = ParameterWrapper()
shared_p[param].modules_cls.add(module.__class__)
shared_p[param].num_modules += 1
shared_p[param].parameter = param
visited_wrapper.append(shared_p[param])
# Special handle for nn.ParameterList since ParameterList is also a child module
# Use the father's module class
for _, sub_m in module.named_children():
if isinstance(sub_m, torch.nn.ParameterList):
for _, param in sub_m.named_parameters():
if param is None:
continue
if param not in shared_p:
shared_p[param] = ParameterWrapper()
shared_p[param].parameter = param
shared_p[param].modules_cls.add(module.__class__)
visited_wrapper.append(shared_p[param])
# If Linear's weight is shared by some module cannot be casted
# Linear's bias should not be casted too
union_set = set()
for param_w in visited_wrapper:
union_set = union_set | param_w.modules_cls
for param_w in visited_wrapper:
param_w.modules_cls = union_set
del visited_wrapper
for _, child in module._modules.items():
get_shared_parameter_status(child, shared_p)
def remove_empty_tensor(out):
empty_tensor_key = [
key
for key in out.keys()
if key.endswith(
("_ipex_module_empty_weight_tensor", "_ipex_module_empty_bias_tensor")
)
]
for key in empty_tensor_key:
del out[key]
return out
def patch_state_dict(model, params_attr, mode):
def cast_back_state_dict(self, *args, destination=None, prefix="", keep_vars=False):
with torch.no_grad(), contextlib.ExitStack() as stack:
for v in params_attr.values():
if mode == "inference":
stack.enter_context(v.inference_cast_save())
elif mode == "training":
stack.enter_context(v.training_cast_save())
else:
assert mode == "prepack"
stack.enter_context(v.prepack_cast_save())
out = self._original_state_dict(
*args, destination=destination, prefix=prefix, keep_vars=keep_vars
)
# We don't save the _ipex_module_empty_weight_tensor or _ipex_module_empty_bias_tensor Parameter in the state dict
out = remove_empty_tensor(out)
return out
if not hasattr(model, "_original_state_dict"):
setattr(model, "_original_state_dict", model.state_dict) # noqa: B010
model.state_dict = types.MethodType(cast_back_state_dict, model)
class ParameterWrapper(object):
def __init__(self):
# Holding module class with the same Parameter
# Used to inferring whether this Parameter can be cast or prepacked
self.modules_cls: Set[type] = set()
# We will only pack weight if there is only 1 module are using this Parameter
self.num_modules: int = 0
# Master parameter for low precision training (bf16/fp16)
self.master_parameter: torch.nn.Parameter = None
# Parameter in the module, for example, module.weight
self.parameter: torch.nn.Parameter = None
# Parameter trail for split optimization
self.parameter_trail: torch.Tensor = None
# The original dtype for Paramter
self.original_dtype: torch.dtype = None
# The caseted dtype by ipex.optimize
self.casted_dtype: torch.dtype = None
# Whether using split optimization
self.split: bool = None
# Whether weight is channels last for Conv/Conv_transpose
self.weight_channels_last: bool = None
# op context for prepacked weight
self.op_ctx = None
# shape before weight prepack, need this to check
# whether we should pack state in optimizers
self.plain_format_shape: torch.Size = None
def can_cast_inference(self, dtype):
if self.casted_dtype is not None:
# already casted
assert dtype == self.casted_dtype
return True
ori_dtype = self.parameter.dtype
if ori_dtype not in (torch.float, torch.float32, torch.bfloat16, torch.float16):
warnings.warn(
f"WARNING: Can't convert model's parameters dtype from {ori_dtype} to {dtype}"
)
return False
module_cls = IPEX_WEIGHT_CONVERT_MODULE_CPU(True, dtype)
return all(cls in module_cls for cls in self.modules_cls)
def cast_for_inference(self, dtype):
if self.original_dtype is not None:
# current parameter is casted
return
self.casted_dtype = dtype
self.original_dtype = self.parameter.dtype
casted_param = self.parameter.to(dtype)
with torch.no_grad():
self.parameter.data = casted_param
def can_cast_training(self, dtype):
if self.casted_dtype is not None:
# already casted
assert dtype == self.casted_dtype
return True
ori_dtype = self.parameter.dtype
if ori_dtype not in (
torch.float,
torch.float32,
):
warnings.warn(
f"WARNING: Can't convert model's parameters dtype from {ori_dtype} to {dtype}"
)
return False
module_cls = IPEX_WEIGHT_CONVERT_MODULE_CPU(False, dtype)
return all(cls in module_cls for cls in self.modules_cls)
def cast_for_training(self, dtype, split):
if self.original_dtype is not None:
# current parameter is casted
return
self.original_dtype = self.parameter.dtype
self.casted_dtype = dtype
self.split = split
if split:
assert (
dtype == torch.bfloat16
), "master_weight_split is only support for bf16 now"
top, self.parameter_trail = torch.ops.torch_ipex.split_float_bfloat16(
self.parameter.data
)
with torch.no_grad():
self.parameter.data = top
else:
# for non-split case, module use different parameter with optimizer
self.master_parameter = self.parameter
self.parameter = torch.nn.Parameter(
self.master_parameter.data.to(dtype),
requires_grad=self.master_parameter.requires_grad,
)
def inference_cast_save(self):
@contextlib.contextmanager
def ctx():
if self.original_dtype is not None:
self.parameter.data = self.parameter.to(self.original_dtype)
try:
yield
finally:
if self.original_dtype is not None:
self.parameter.data = self.parameter.to(self.casted_dtype)
return ctx()
def training_cast_save(self):
@contextlib.contextmanager
def ctx():
self._training_cast_before_save()
try:
yield
finally:
self._training_cast_after_save()
return ctx()
def prepack_cast_save(self):
@contextlib.contextmanager
def ctx():
self._cast_unpack_before_save()
try:
yield
finally:
self._cast_unpack_after_save()
return ctx()
def _inference_cast_before_save(self):
if self.original_dtype is not None:
self.parameter.data = self.parameter.to(self.original_dtype)
def _inference_cast_after_save(self):
if self.original_dtype is not None:
self.parameter.data = self.parameter.to(self.casted_dtype)
def _training_cast_before_save(self):
if self.original_dtype is None:
return
assert self.original_dtype in (
torch.float,
torch.float32,
)
if self.split:
assert self.parameter_trail is not None
fp32_param = torch.ops.torch_ipex.cat_bfloat16_float(
self.parameter.data, self.parameter_trail
)
with torch.no_grad():
self.parameter.data = fp32_param
else:
# will save parameter for non-split case
with torch.no_grad():
self.parameter.data = self.master_parameter.data
def _training_cast_after_save(self):
if self.original_dtype is None:
return
if self.split:
assert self.casted_dtype == torch.bfloat16
top, self.parameter_trail = torch.ops.torch_ipex.split_float_bfloat16(
self.parameter.data
)
with torch.no_grad():
self.parameter.data = top
else:
self.parameter.data = self.master_parameter.data.to(self.casted_dtype)
def _cast_unpack_before_save(self):
if self.split is not None:
self._training_cast_before_save()
elif self.original_dtype is not None:
self.parameter.data = self.parameter.to(self.original_dtype)
if self.op_ctx is None:
return
with torch.no_grad():
if self.master_parameter is not None:
self.parameter.data = self.op_ctx.to_public(self.master_parameter)
else:
self.parameter.data = self.op_ctx.to_public(self.parameter)
def _cast_unpack_after_save(self):
if self.split is not None:
self._training_cast_after_save()
elif self.original_dtype is not None:
self.parameter.data = self.parameter.to(self.casted_dtype)
if self.op_ctx is None:
return
with torch.no_grad():
if self.master_parameter is None:
self.parameter.data = self.op_ctx.pack(self.parameter)
if self.parameter_trail is not None:
self.parameter_trail = self.op_ctx.pack(self.parameter_trail)
def can_prepack(self, module, is_training):
if self.num_modules != 1:
return False
return _should_prepack(module, is_training)
def prepack(self, module, is_training):
self.plain_format_shape = module.weight.shape
if module.__class__ not in IPEX_WEIGHT_PREPACK_MODULE_CPU():
raise ValueError(
"Cannot prepack module with class {}".format(module.__class__)
)
target_module = IPEX_WEIGHT_PREPACK_MODULE_CPU()[module.__class__]
if target_module in (
_IPEXConv1d,
_IPEXConv2d,
_IPEXConv3d,
):
self.conv_prepack(module)
elif target_module in (
_IPEXConvTranspose2d,
_IPEXConvTranspose3d,
):
self.conv_transpose_prepack(module)
else:
assert target_module in (
_IPEXLinear,
_IPEXLinearAllreduce,
)
self.linear_prepack(module, is_training)
def pack_weight(self, use_dnnl=True):
if not use_dnnl:
# TODO: Haozhe, LinWei
# weired case that cannot override ".data" for mkl here
# The op_ctx seems not hold the original plain format weight
self.parameter = self.op_ctx.get_weight()
else:
with torch.no_grad():
self.parameter.data = self.op_ctx.get_weight()
if self.master_parameter is not None:
with torch.no_grad():
self.master_parameter.data = self.op_ctx.pack(
self.master_parameter.data
)
if self.parameter_trail is not None:
self.parameter_trail = self.op_ctx.pack(self.parameter_trail)
def conv_prepack(self, module):
module.prepack_input_shape = (
module.input_shape if hasattr(module, "input_shape") else []
)
module.weight_channels_last = module.weight.is_contiguous(
memory_format=torch.channels_last
) or module.weight.is_contiguous(memory_format=torch.channels_last_3d)
self.weight_channels_last = module.weight_channels_last
module.weight_size = module.weight.size()
module._real_padding = (
module.padding
if module.padding_mode == "zeros"
else tuple([0] * (len(module.weight_size) - 2))
)
self.op_ctx = torch.ops.ipex_prepack.convolution_prepack(
module.weight,
module.bias,
module.stride,
module._real_padding,
module.dilation,
module.groups,
module.weight_channels_last,
module.prepack_input_shape,
)
self.pack_weight()
def conv_transpose_prepack(self, module):
module.prepack_input_shape = (
module.input_shape if hasattr(module, "input_shape") else []
)
module.weight_channels_last = module.weight.is_contiguous(
memory_format=torch.channels_last
) or module.weight.is_contiguous(memory_format=torch.channels_last_3d)
self.weight_channels_last = module.weight_channels_last
module.weight_size = module.weight.size()
module._real_padding = (
module.padding
if module.padding_mode == "zeros"
else tuple([0] * (len(module.weight_size) - 2))
)
self.op_ctx = torch.ops.ipex_prepack.conv_transpose_prepack(
module.weight,
module.bias,
module.stride,
module.padding,
module.output_padding,
module.groups,
module.dilation,
module.weight_channels_last,
module.prepack_input_shape,
)
self.pack_weight()
def linear_prepack(self, module, is_training):
if module.__class__ in IPEX_GEMM_MODULE_CPU():
if module.weight.dtype == torch.half:
use_dnnl = True
elif (
module.weight.dtype == torch.float32
and not is_training
and frontend.get_fp32_math_mode(device="cpu")
== frontend.FP32MathMode.FP32
and not _using_dnnl()
):
use_dnnl = False
else:
assert module.weight.dtype in [
torch.float32,
torch.bfloat16,
], "Only float, bf16 and fp16 are supported"
use_dnnl = True
module.use_dnnl = use_dnnl
if not hasattr(module, "out_features"):
setattr(module, "out_features", module.weight.shape[0]) # noqa: B010
# prepare batch size
module.batch_size_collapsed = None
if hasattr(module, "input_shape"):
module.batch_size_collapsed = 1
for i in range(len(module.input_shape) - 1):
module.batch_size_collapsed *= module.input_shape[i]
# create linear op context
if module.use_dnnl:
self.op_ctx = torch.ops.ipex_prepack.linear_prepack(
module.weight, module.bias, module.batch_size_collapsed
)
else:
self.op_ctx = torch.ops.ipex_prepack.mkl_sgemm_prepack(
module.weight, module.bias, module.batch_size_collapsed
)
self.pack_weight(use_dnnl)
def load_cast_and_prepack(self, module, param):
# load from state dict
if self.split is not None:
if self.split:
(
to_pack,
self.parameter_trail,
) = torch.ops.torch_ipex.split_float_bfloat16(param)
else:
to_pack = param.to(torch.bfloat16)
elif self.casted_dtype is not None:
to_pack = param.to(self.casted_dtype)
else:
to_pack = param
if module.__class__ in IPEX_WEIGHT_PREPACK_MODULE_CPU():
m_cls = IPEX_WEIGHT_PREPACK_MODULE_CPU()[module.__class__]
else:
m_cls = module.__class__
if m_cls in (
_IPEXConv1d,
_IPEXConv2d,
_IPEXConv3d,
):
loaded_ctx = torch.ops.ipex_prepack.convolution_prepack(
to_pack,
module.bias,
module.stride,
module._real_padding,
module.dilation,
module.groups,
module.weight_channels_last,
module.prepack_input_shape,
)
elif m_cls in (
_IPEXConvTranspose2d,
_IPEXConvTranspose3d,
):
loaded_ctx = torch.ops.ipex_prepack.conv_transpose_prepack(
to_pack,
module.bias,
module.stride,
module.padding,
module.output_padding,
module.groups,
module.dilation,
module.weight_channels_last,
module.prepack_input_shape,
)
else:
assert m_cls in (
_IPEXLinear,
_IPEXLinearAllreduce,
)
if module.use_dnnl:
loaded_ctx = torch.ops.ipex_prepack.linear_prepack(
to_pack, module.bias, module.batch_size_collapsed
)
else:
loaded_ctx = torch.ops.ipex_prepack.mkl_sgemm_prepack(
to_pack, module.bias, module.batch_size_collapsed
)
self.op_ctx.load_from_ctx(loaded_ctx)
self.parameter.data = self.op_ctx.get_weight()
if self.parameter_trail is not None:
self.parameter_trail = self.op_ctx.pack(self.parameter_trail)
if self.master_parameter is not None:
self.master_parameter.data = self.op_ctx.pack(param)
def load_cast(self, param):
if self.split is not None:
if self.split:
(
self.parameter.data,
self.parameter_trail,
) = torch.ops.torch_ipex.split_float_bfloat16(param)
else:
self.parameter.data = param.to(self.casted_dtype)
self.master_parameter.data = param
elif self.casted_dtype is not None:
self.parameter.data = param.to(self.casted_dtype)
else:
self.parameter.data = param
def load(self, module, param):
# load from state dict
if self.op_ctx is not None:
self.load_cast_and_prepack(module, param)
else:
self.load_cast(param)
| 23,005 | 35.229921 | 126 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/utils/_weight_cast.py | import torch
import sys
from intel_extension_for_pytorch.optim import _optimizer_utils
import types
from ._parameter_wrapper import get_shared_parameter_status, patch_state_dict
def weight_dtype_convert_with_ipex(
model, optimizer, params_attr, master_weight_split, dtype=torch.bfloat16
):
assert dtype in [
torch.bfloat16,
torch.float16,
], "weight convert only support bf16 and fp16"
if len(params_attr) == 0:
get_shared_parameter_status(model, params_attr)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
for name, para in self.named_parameters():
if not hasattr(self, name):
continue
para_name = prefix + name
with torch.no_grad():
if para_name in state_dict:
fp32_param = state_dict[para_name]
param_wrapper = getattr(self, name + "_wrapper")
param_wrapper.load(self, fp32_param)
def convert(module):
if not hasattr(module, "master_weight_split"):
setattr(module, "master_weight_split", master_weight_split) # noqa: B010
# replace weight/bias
for name, param in module._parameters.items():
if param is None:
continue
param_wrapper = params_attr[param]
if param_wrapper.can_cast_training(dtype):
param_wrapper.cast_for_training(dtype, master_weight_split)
if not master_weight_split:
with torch.no_grad():
setattr(
module,
name,
param_wrapper.parameter,
)
setattr(module, name + "_wrapper", param_wrapper)
module._load_from_state_dict = types.MethodType(
_load_from_state_dict, module
)
def isCLIPTextEmbeddings(module):
mod = "transformers.models.clip.modeling_clip"
return (
mod in sys.modules
and hasattr(sys.modules[mod], "CLIPTextEmbeddings")
and isinstance(module, sys.modules[mod].CLIPTextEmbeddings)
)
def convert_rec(module):
for sub_module in module.children():
convert_rec(sub_module)
if not isCLIPTextEmbeddings(module):
convert(module)
convert_rec(model)
patch_state_dict(model, params_attr, "training")
if optimizer is not None:
_optimizer_utils.patch_load_state_dict(optimizer)
if not hasattr(optimizer, "params_attr"):
setattr(optimizer, "params_attr", params_attr) # noqa: B010
if not master_weight_split:
_optimizer_utils.patch_step_for_master_weight_training(optimizer)
_optimizer_utils.patch_zero_grad_for_master_weight_training(optimizer)
return model, optimizer, params_attr
| 3,137 | 34.659091 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/utils/_weight_prepack.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import os
import pkg_resources
from intel_extension_for_pytorch import optim
logger = logging.getLogger(__name__)
def may_import_deepspeed_modules():
try:
# import deepspeed in a global space will raise circular import error
# intel-extension-for-deepspeed imports both IPEX and deepspeed
from deepspeed.module_inject.layers import LinearAllreduce, LinearLayer
return LinearAllreduce, LinearLayer
except ImportError:
return None
installed_pkg = {pkg.key for pkg in pkg_resources.working_set}
if "deepspeed" in installed_pkg:
from deepspeed import comm
def _all_reduce(self, reduceOp, tag, ranks, group_size):
comm.all_reduce(self, async_op=False)
return self
ds_comm = torch.library.Library("deepspeed_comm", "DEF")
ds_comm.define(
"all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor"
)
ds_comm_lib_cpu = torch.library.Library("deepspeed_comm", "IMPL", "CPU")
ds_comm_lib_cpu.impl("all_reduce", _all_reduce)
def _ipex_module_load_from_state_dict_(self, state_dict, prefix):
w_name = prefix + "weight"
b_name = prefix + "bias"
loaded_weight = state_dict[w_name]
if b_name in state_dict:
loaded_bias = state_dict[b_name]
self.bias_wrapper.load(self, loaded_bias)
self.weight_wrapper.load(self, loaded_weight)
class _IPEXPrepackModule(nn.Module):
def _get_forward_weight(self):
return self.weight if self.training else self._ipex_module_empty_weight_tensor
def _get_forward_bias(self):
return self.bias if self.training else self._ipex_module_empty_bias_tensor
class _IPEXConvNd(_IPEXPrepackModule):
__constants__ = [
"stride",
"padding",
"dilation",
"groups",
"out_channels",
"kernel_size",
]
def __init__(self):
super(_IPEXConvNd, self).__init__()
def _save_to_state_dict(self, destination, prefix, keep_vars):
assert (
not keep_vars
), "can not using keep_vars true when to save _IPEXConvNd's parameters"
super(_IPEXConvNd, self)._save_to_state_dict(destination, prefix, keep_vars)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
with torch.no_grad():
_ipex_module_load_from_state_dict_(self, state_dict, prefix)
def forward(self, x):
# [ Note -- Fix the size of the saved TorchScript model ]
# In inference case:
# We pass empty tensors for weight and bias in forward to solve the size increase issue of the saved TorchScript model.
# For runtime memory usage, we don't have concern to use real tensors since
# self.weight and self.bias shares the storage with the tensors in self.ctx,
# thus the runtime memory won't grow.
# However, for the saved TorchScript model, after torch.jit.trace and torch.jit.freeze,
# self.ctx (with weight and bias inside), self.weight and self.bias will all be converted to prim::Constant on the graph
# and they will all be serialized which makes the saved model size grow.
# For inference, we pass in empty tensors in the forward function for weight and bias,
# since self.weight and self.bias are not used,
# they won't be on the traced graph, thus won't be saved later.
# In training case:
# Since autograd requires that grad shape to match the input tensor shape in the forward func,
# we can't use empty tensor here.
if self.padding_mode != "zeros":
return torch.ops.torch_ipex.convolution_forward(
F.pad(x, self._reversed_padding_repeated_twice, mode=self.padding_mode),
self._get_forward_weight(),
self._get_forward_bias(),
self.ctx.get_data_handle(),
self.weight_size,
self._real_padding,
self.stride,
self.dilation,
self.weight_channels_last,
)
return torch.ops.torch_ipex.convolution_forward(
x,
self._get_forward_weight(),
self._get_forward_bias(),
self.ctx.get_data_handle(),
self.weight_size,
self._real_padding,
self.stride,
self.dilation,
self.weight_channels_last,
)
class _IPEXConv1d(_IPEXConvNd):
def __init__(self):
super(_IPEXConv1d, self).__init__()
class _IPEXConv2d(_IPEXConvNd):
def __init__(self):
super(_IPEXConv2d, self).__init__()
class _IPEXConv3d(_IPEXConvNd):
def __init__(self):
super(_IPEXConv3d, self).__init__()
class _IPEXLinear(_IPEXPrepackModule):
def __init__(self):
super(_IPEXLinear, self).__init__()
def post_ipex_gemm(self, output):
return output
def forward(self, x):
if self.use_dnnl:
output = torch.ops.torch_ipex.ipex_linear(
x,
self._get_forward_weight(),
self._get_forward_bias(),
self.ctx.get_data_handle(),
self.out_features,
)
else:
output = torch.ops.torch_ipex.ipex_MKLSGEMM(
x,
self._get_forward_weight(),
self._get_forward_bias(),
self.ctx.get_data_handle(),
self.out_features,
)
return self.post_ipex_gemm(output)
def _save_to_state_dict(self, destination, prefix, keep_vars):
assert (
not keep_vars
), "can not using keep_vars true when to save _IPEXLinear's parameters"
super(_IPEXLinear, self)._save_to_state_dict(destination, prefix, keep_vars)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
with torch.no_grad():
_ipex_module_load_from_state_dict_(self, state_dict, prefix)
class _IPEXLinearAllreduce(_IPEXLinear):
def __init__(self):
super(_IPEXLinearAllreduce, self).__init__()
def post_ipex_gemm(self, output):
if self.mp_group is not None:
torch.ops.deepspeed_comm.all_reduce(
output,
"sum",
"",
list(torch.arange(int(os.environ["WORLD_SIZE"]))),
int(os.environ["WORLD_SIZE"]),
)
if self.module_bias is not None:
output += self.module_bias
return output
class _IPEXConvTransposeNd(_IPEXPrepackModule):
__constants__ = [
"stride",
"padding",
"dilation",
"groups",
"out_channels",
"kernel_size",
"output_padding",
]
def __init__(self):
super(_IPEXConvTransposeNd, self).__init__()
def _save_to_state_dict(self, destination, prefix, keep_vars):
assert (
not keep_vars
), "can not using keep_vars true when to save _IPEXConvTransposeNd's parameters"
super(_IPEXConvTransposeNd, self)._save_to_state_dict(
destination, prefix, keep_vars
)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
with torch.no_grad():
_ipex_module_load_from_state_dict_(self, state_dict, prefix)
def forward(self, x):
return torch.ops.torch_ipex.conv_transpose(
x,
self._get_forward_weight(),
self._get_forward_bias(),
self.ctx.get_data_handle(),
self.weight_size,
self.padding,
self.output_padding,
self.stride,
self.dilation,
self.groups,
self.weight_channels_last,
)
class _IPEXConvTranspose2d(_IPEXConvTransposeNd):
def __init__(self):
super(_IPEXConvTranspose2d, self).__init__()
class _IPEXConvTranspose3d(_IPEXConvTransposeNd):
def __init__(self):
super(_IPEXConvTranspose3d, self).__init__()
def is_with_hook_on_weight_or_bias(module):
# If hook is on `weight` or `bias`, will not prepack.
if module._forward_pre_hooks is not None:
for _, hook in module._forward_pre_hooks.items():
if hasattr(hook, "name") and (hook.name == "weight" or hook.name == "bias"):
return True
if module._forward_hooks is not None:
for _, hook in module._forward_hooks.items():
if hasattr(hook, "name") and (hook.name == "weight" or hook.name == "bias"):
return True
if module._backward_hooks is not None:
for _, hook in module._backward_hooks.items():
if hasattr(hook, "name") and (hook.name == "weight" or hook.name == "bias"):
return True
def weight_prepack_with_ipex(model, optimizer, params_attr, device_type="cpu"):
from ._parameter_wrapper import (
patch_state_dict,
get_shared_parameter_status,
IPEX_WEIGHT_PREPACK_MODULE_CPU,
)
is_training = optimizer is not None
if len(params_attr) == 0:
get_shared_parameter_status(model, params_attr)
def found_wrapper(parameter, params_attr):
for _, v in params_attr.items():
if parameter is v.parameter:
return v
return None
def convert(m, optimizer, params_attr):
# already packed for reentrancy test
if m.__class__ in IPEX_WEIGHT_PREPACK_MODULE_CPU().values():
return m
# pre check module class
if m.__class__ not in IPEX_WEIGHT_PREPACK_MODULE_CPU().keys():
return m
if not hasattr(m, "weight"):
return m
if m.weight is None:
return m
if is_with_hook_on_weight_or_bias(m):
return m
if hasattr(m, "bias") and m.bias is not None:
if m.bias in params_attr:
param_wrapper = params_attr[m.bias]
else:
assert (
m.bias.dtype in [torch.bfloat16, torch.half]
and not m.master_weight_split
)
param_wrapper = found_wrapper(m.bias, params_attr)
assert param_wrapper is not None
bias_wrapper = param_wrapper
else:
bias_wrapper = None
if m.weight in params_attr:
param_wrapper = params_attr[m.weight]
else:
assert (
m.weight.dtype in [torch.bfloat16, torch.half]
and not m.master_weight_split
)
param_wrapper = found_wrapper(m.weight, params_attr)
assert param_wrapper is not None
if param_wrapper.can_prepack(m, is_training):
new_m = IPEX_WEIGHT_PREPACK_MODULE_CPU()[m.__class__]()
all_reduce_bias = m.bias
if isinstance(new_m, _IPEXLinearAllreduce):
m.bias = None
param_wrapper.prepack(m, is_training)
new_m.__dict__ = m.__dict__
if isinstance(new_m, _IPEXLinearAllreduce):
new_m.module_bias = all_reduce_bias
new_m.ctx = param_wrapper.op_ctx
setattr(new_m, "weight_wrapper", param_wrapper) # noqa: B010
setattr(new_m, "bias_wrapper", bias_wrapper) # noqa: B010
optimizer_para = param_wrapper.parameter
if param_wrapper.master_parameter is not None:
optimizer_para = param_wrapper.master_parameter
optim._optimizer_utils.pack_optimizer_states(
optimizer, optimizer_para, param_wrapper
)
new_m.training = is_training
# _ipex_module_empty_weight_tensor and _ipex_module_empty_bias_tensor
# have to be a Parameter so that dynamo could convert it into FakeTensor
# These empty tensors will only be used during inference but we'll set
# it in both training and eval mode to supprt the use case of the below
# workflow:
# model.train() -> ipex.optimize(model) -> model.eval()
new_m._ipex_module_empty_weight_tensor = torch.nn.Parameter(
torch.Tensor().to(dtype=new_m.weight.dtype)
)
if new_m.bias is None:
new_m.register_parameter("_ipex_module_empty_bias_tensor", None)
else:
new_m._ipex_module_empty_bias_tensor = torch.nn.Parameter(
torch.Tensor().to(dtype=new_m.bias.dtype)
)
return new_m
else:
return m
def convert_rec(m, optimizer, params_attr):
new_m = convert(m, optimizer, params_attr)
for name, sub_m in m.named_children():
setattr(new_m, name, convert_rec(sub_m, optimizer, params_attr)[0])
return new_m, optimizer, params_attr
if device_type == "cpu":
opt_model, opt_optmizer, params_attr = convert_rec(
model, optimizer, params_attr
)
patch_state_dict(opt_model, params_attr, "prepack")
setattr(opt_model, "params_attr", params_attr) # noqa: B010
if opt_optmizer is not None:
setattr(opt_optmizer, "params_attr", params_attr) # noqa: B010
optim._optimizer_utils.patch_load_state_dict(opt_optmizer)
optim._optimizer_utils.patch_state_dict(opt_optmizer)
return opt_model, opt_optmizer, params_attr
def record_input_shape_for_prepack(module, sample_input):
def hook_function(self, input):
# input for linear/conv/transpose conv received here will be Tuple[Tensor]
if (
self in [torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d]
and self.padding_mode != "zeros"
):
self.input_shape = F.pad(
input[0], self._reversed_padding_repeated_twice, mode=self.padding_mode
).shape
else:
self.input_shape = input[0].shape
def register_hook_function(module):
if type(module) in [
torch.nn.Linear,
torch.nn.Conv1d,
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
]:
module.register_forward_pre_hook(hook_function)
def register_hook_function_rec(module):
register_hook_function(module)
for child in module.children():
register_hook_function_rec(child)
module_is_train = module.training
module.eval()
register_hook_function_rec(module)
module(*sample_input)
if module_is_train:
module.train()
| 14,977 | 33.671296 | 128 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/utils/__init__.py | from intel_extension_for_pytorch.nn.utils import _weight_prepack
from intel_extension_for_pytorch.nn.utils import _lstm_convert
from . import _model_convert, _weight_cast
| 171 | 42 | 64 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/utils/_lstm_convert.py | import torch
import copy
from torch.nn.utils.rnn import PackedSequence
class _LSTM(torch.nn.LSTM):
# This is a solution to swap the lstm module with the ipex counterpart
# and will upstream this operator to PyTorch when oneDNN support
# bias and src_iter_c in bf16 in bf16 inference. Will keep this
# for better support of blocked-format weight, e.g. for training.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# port from torch/nn/modules/rnn.py
# replace the _VF.lstm with torch.ops.torch_ipex.lstm when the input is not PackedSequence
def forward(self, input, hx=None): # noqa: F811
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
# fallback to PyTorch LSTM since PackedSequence unsupported in oneDNN
return super(_LSTM, self).forward(input, hx)
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
real_hidden_size = (
self.proj_size if self.proj_size > 0 else self.hidden_size
)
h_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
device=input.device,
)
c_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (h_zeros, c_zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
result = torch.ops.torch_ipex.ipex_lstm(
input,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
output = result[0]
hidden = result[1:]
return output, self.permute_hidden(hidden, unsorted_indices)
def replace_params_in_optimizer(optimizer, param_dict):
if optimizer is None:
return
for group in optimizer.param_groups:
for i, p in enumerate(group["params"]):
if p in param_dict:
new_param = param_dict[p]
group["params"][i] = new_param
if p in optimizer.state:
optimizer.state[new_param] = optimizer.state.pop(p)
def replace_lstm_with_ipex_lstm(model, optimizer):
# replace lstm with ipex lstm during inference
# does not support the case where model itself is torch.nn.LSTM
for child_name, child in model.named_children():
if isinstance(child, torch.nn.LSTM):
assert hasattr(
child, "weight_ih_l0"
), "torch.nn.LSTM should have weight_ih_l0"
ipex_lstm = _LSTM(
child.input_size,
child.hidden_size,
child.num_layers,
child.bias,
child.batch_first,
child.dropout,
child.bidirectional,
child.proj_size,
child.weight_ih_l0.device,
child.weight_ih_l0.dtype,
)
ipex_lstm.__dict__ = copy.deepcopy(child.__dict__)
setattr(model, child_name, ipex_lstm)
param_dict = {}
original_params = dict(child.named_parameters())
for name, para in ipex_lstm.named_parameters():
param_dict.update({original_params[name]: para})
replace_params_in_optimizer(optimizer, param_dict)
else:
replace_lstm_with_ipex_lstm(child, optimizer)
| 4,226 | 36.741071 | 94 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/nn/functional/_tensor_method.py | import warnings
import torch
from torch.overrides import has_torch_function_unary, handle_torch_function
def _numpy(x):
if x.dtype == torch.bfloat16:
warnings.warn(
"calling in ipex numpy which is not share memory with torch tensor for bfloat16 input."
)
return torch._C._TensorBase.numpy(x.float())
else:
return torch._C._TensorBase.numpy(x)
# Fix https://github.com/pytorch/pytorch/issues/82764
def __format__(self: torch.Tensor, format_spec):
if has_torch_function_unary(self):
return handle_torch_function(
torch.Tensor.__format__, (self,), self, format_spec
)
if self.dim() == 0 and not self.is_meta and issubclass(type(self), torch.Tensor):
return self.item().__format__(format_spec)
return object.__format__(self, format_spec)
torch.Tensor.numpy = _numpy
torch.Tensor.__format__ = __format__
| 905 | 30.241379 | 99 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_inductor/compile_fx.py | import builtins
import contextlib
from typing import List, Optional, Union, Dict
from unittest.mock import patch
import torch
from .decomposition import get_decompositions
from .lowering import patch_lowering
@contextlib.contextmanager
def patch_codegen():
from torch._inductor.scheduler import Scheduler
from .codegen.cpp import IpexCppScheduling
def get_backend(scheduler, device):
# TODO(jgong5): support xpu
if device.type == "cpu":
if device not in scheduler.backends or not isinstance(
scheduler.backends[device], IpexCppScheduling
):
scheduler.backends[device] = IpexCppScheduling(scheduler)
else:
if device not in scheduler.backends:
scheduler.backends[device] = scheduler.create_backend(device)
return scheduler.backends[device]
with patch.object(Scheduler, "get_backend", get_backend):
yield
@contextlib.contextmanager
def patch_functions():
"""
On-the-fly patch:
1. lowering registration
2. codegen backends
"""
with patch_lowering(), patch_codegen():
yield
def compile_fx(
model: torch.fx.GraphModule,
example_inputs: List[torch.Tensor],
mode: Union[str, None] = None,
options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
):
from torch._inductor.compile_fx import compile_fx as inductor_compile
with patch_functions():
return inductor_compile(
model, example_inputs, decompositions=get_decompositions()
)
| 1,575 | 28.185185 | 81 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_inductor/lowering.py | # Custom lowerings overriding those from PyTorch
import contextlib
import functools
from torch._inductor.lowering import ELEMENTWISE_TYPE_PROMOTION_KIND
lowering_overrides = {}
def _register_lowering(
aten_fn,
decomp_fn,
broadcast=False,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
convert_input_to_bool=False,
):
if not isinstance(aten_fn, (list, tuple)):
aten_fn = [aten_fn]
else:
aten_fn = list(aten_fn)
for fn in aten_fn:
lowering_overrides.update(
{fn: (decomp_fn, broadcast, type_promotion_kind, convert_input_to_bool)}
)
def register_lowering(
aten_fn,
broadcast=False,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
convert_input_to_bool=False,
):
return functools.partial(
_register_lowering,
aten_fn,
broadcast=broadcast,
type_promotion_kind=type_promotion_kind,
convert_input_to_bool=convert_input_to_bool,
)
@contextlib.contextmanager
def patch_lowering():
import copy
from torch._inductor.lowering import lowerings
from torch._inductor.lowering import register_lowering
old_lowerings = lowerings
lowerings = copy.copy(lowerings)
for fn, (
decomp_fn,
broadcast,
type_promotion_kind,
convert_input_to_bool,
) in lowering_overrides.items():
register_lowering(
fn,
broadcast=broadcast,
type_promotion_kind=type_promotion_kind,
convert_input_to_bool=convert_input_to_bool,
)(decomp_fn)
yield
lowerings = old_lowerings
| 1,638 | 24.609375 | 84 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_inductor/dynamo_backends.py | from torch._dynamo import register_backend
from .compiler import compile
@register_backend
def ipex(model, inputs):
return compile(model, inputs)
| 151 | 20.714286 | 42 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_inductor/compiler.py | import torch
from torch._subclasses import FakeTensor
from torch.utils._mode_utils import no_dispatch
import builtins
import warnings
from typing import Callable, Dict, Optional, Union, List
_compiler_backend = "torchscript"
def _get_compiler_backend():
return _compiler_backend
def _set_compiler_backend(backend="torchscript"):
global _compiler_backend
_compiler_backend = backend
def compile(
model: torch.fx.GraphModule,
example_inputs: List[torch.Tensor],
mode: Union[str, None] = None,
options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
) -> Callable:
def defake(x):
if not isinstance(x, FakeTensor):
return x
if x._has_symbolic_sizes_strides:
size = [
s.node.shape_env.size_hint(s.node.expr)
if isinstance(s, torch.SymInt)
else s
for s in x.size()
]
stride = [
s.node.shape_env.size_hint(s.node.expr)
if isinstance(s, torch.SymInt)
else s
for s in x.stride()
]
else:
size = x.size()
stride = x.stride()
y = torch.empty_strided(
size,
stride,
dtype=x.dtype,
device=x.device,
requires_grad=x.requires_grad,
)
y.zero_()
return y
if _get_compiler_backend() == "inductor":
from .compile_fx import compile_fx
return compile_fx(model, example_inputs, mode, options)
try:
with no_dispatch():
real_inputs = list(map(defake, example_inputs))
with torch.no_grad():
traced_model = torch.jit.trace(model.eval(), real_inputs)
traced_model = torch.jit.freeze(traced_model)
return traced_model
except Exception:
warnings.warn("JIT trace failed during the IPEX compile process.")
return model
| 2,000 | 27.183099 | 81 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_inductor/decomposition.py | import logging
import torch._decomp as decomp
log = logging.getLogger(__name__)
decomposition_overrides = {}
def register_decomposition(ops):
for op in [ops] if callable(ops) else ops:
if op in decomposition_overrides:
log.warning(f"duplicate decomp: {ops}")
return decomp.register_decomposition(ops, decomposition_overrides)
# Add custom decompositions here with `register_decomposition` decorator
def get_decompositions():
from torch._inductor.decomposition import select_decomp_table
return {**select_decomp_table(), **decomposition_overrides}
| 591 | 25.909091 | 72 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/_inductor/codegen/cpp.py | from torch._inductor.codegen.cpp import CppScheduling
class IpexCppScheduling(CppScheduling):
def __init__(self, scheduler):
super().__init__(scheduler)
| 167 | 23 | 53 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/single_card.py | import os
import tempfile
import torch
import torch.distributed as dist
import intel_extension_for_pytorch # noqa F401
import oneccl_bindings_for_pytorch # noqa F401
class single_card_dist:
r"""DistributedDataParallel(DDP) scaling API for XPU devices on one card.
This API wraps pytorch DDP related module, and provides a simple usage to
enable DDP training of models based on XPU devices on one card.
Note: This API only focus on XPU devices on one card currently. Devices on multi-cards will be supported further.
Args:
model: model to be parallelized
train_dataset: dataset for training
Example usage::
Assuming that you have a model runs on single tile, you only need to make minor changes
to enable the DDP training.
Please follow these steps:
1. Import this API:
>>> try:
>>> from intel_extension_for_pytorch.xpu.single_card import single_card_dist
>>> except ImportError:
>>> raise ImportError("oneccl_bindings_for_pytorch not available!")
2. We recommend to use multi_process_spawn launcher in below, as a torch.multiprocessing wrapper.
>>> single_card_dist.multi_process_spawn(main_worker, (args, )) # put arguments of main_worker into a tuple
3. Usage of this API:
>>> dist = single_card_dist(model, train_dataset)
>>> local_rank, model, train_sampler = dist.rank, dist.model, dist.train_sampler
4. Set in the model training:
>>> for epoch in range ...
train_sampler.set_epoch(epoch)
5. Adjust the model where calls local_rank, model, train_sampler correspondingly
e.g.,
i). device: get the xpu information used in model training
>>> xpu = "xpu:{}".format(local_rank)
>>> print("DDP Use XPU: {} for training".format(xpu))
ii). model: use the model warpped by DDP in the following training
iii). train_sampler: use the train_sampler to get the train_loader
>>> train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
"""
def __init__(self, model=None, train_dataset=None):
self.model = model
self.train_dataset = train_dataset
# Initialize the process group with ccl backend
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(
backend="ccl",
init_method=os.environ["INIT_FILE"],
rank=rank,
world_size=world_size,
)
self.rank = dist.get_rank()
self.model = self.get_ddp_model()
self.train_sampler = self.get_train_sampler()
@staticmethod
def multi_process_spawn(fn=None, args=None):
"""
Use torch.multiprocessing to spawn N (world_size) processes.
"""
with tempfile.NamedTemporaryFile() as file:
os.environ["INIT_FILE"] = "file://{}".format(file.name)
proc = torch.multiprocessing.get_context("spawn").Process
processes = []
pid_to_pipe = {}
world_size = len(torch.xpu.getDeviceIdListForCard())
for rank in range(world_size):
parent_conn, child_conn = torch.multiprocessing.Pipe()
child_env = os.environ.copy()
child_env["PMI_SIZE"] = str(world_size)
child_env["PMI_RANK"] = str(rank)
os.environ["RANK"] = child_env["PMI_RANK"]
os.environ["WORLD_SIZE"] = child_env["PMI_SIZE"]
process = proc(target=fn, name="process " + str(rank), args=args)
process.start()
print(f"Start process {rank} with pid {process.pid}")
pid_to_pipe[process.pid] = parent_conn
processes.append(process)
for process in processes:
process.join()
return process.exitcode
# device set (local_rank)
def get_localrank(self):
"""
Returns the local rank of process.
note: for 1 card 2 tiles, local_rank is the same as global rank
"""
return self.rank
# model
def get_ddp_model(self):
"""
Returns class:`~torch.nn.parallel.DistributedDataParallel`.
Parallelizes the application of the given module.
"""
if not self.model:
print("Please input the model!")
return None
self.xpu_device = "xpu:{}".format(self.rank)
torch.xpu.set_device(self.xpu_device)
self.model.xpu(self.xpu_device)
# note we set find_unused_parameters to True defaultly, to enable models (e.g. Bert) which have
# parameters that don't receive gradients as part of this graph are preemptively marked as
# being ready to be reduced. Note this may bring additional overhead.
return torch.nn.parallel.DistributedDataParallel(
self.model,
device_ids=[self.xpu_device],
output_device=self.xpu_device,
find_unused_parameters=True,
)
# data sampler
def get_train_sampler(self):
"""
Returns class:`~torch.utils.data.DistributedSampler`.
Will use no sampler if :obj:`test_dataset` is a :obj:`torch.utils.data.IterableDataset`, a sequential sampler.
"""
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
else:
return torch.utils.data.distributed.DistributedSampler(self.train_dataset)
| 5,732 | 38.537931 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/streams.py | import ctypes
import intel_extension_for_pytorch
class Stream(intel_extension_for_pytorch._C._XPUStreamBase):
def __new__(cls, device=None, priority=0, **kwargs):
with intel_extension_for_pytorch.xpu.device(device):
return super(Stream, cls).__new__(cls, priority=priority, **kwargs)
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.sycl_queue)
def __eq__(self, o):
if isinstance(o, Stream):
return super(Stream, self).__eq__(o)
return False
def __hash__(self):
return hash((self.sycl_queue, self.device))
def __repr__(self):
return (
"<intel_extension_for_pytorch.Stream device={0} sycl_queue={1:#x}>".format(
self.device, self.sycl_queue
)
)
def wait_event(self, event):
r"""Makes all future work submitted to the stream wait for an event.
Arguments:
event (Event): an event to wait for.
"""
event.wait(self)
def wait_stream(self, stream):
r"""Synchronizes with another stream.
All future work submitted to this stream will wait until all kernels
submitted to a given stream at the time of call complete.
Arguments:
stream (Stream): a stream to synchronize.
.. note:: This function returns without waiting for currently enqueued
kernels in :attr:`stream`: only future operations are affected.
"""
self.wait_event(stream.record_event())
def record_event(self, event=None):
r"""Records an event.
Arguments:
event (Event, optional): event to record. If not given, a new one
will be allocated.
Returns:
Recorded event.
"""
if event is None:
event = Event()
event.record(self)
return event
def synchronize(self):
r"""Wait for all the kernels in this stream to complete."""
super(Stream, self).synchronize()
class Event(intel_extension_for_pytorch._C._XPUEventBase):
def __new__(cls, **kwargs):
return super(Event, cls).__new__(cls, **kwargs)
def record(self, stream=None):
r"""Records the event in a given stream.
Uses ``intel_extension_for_pytorch.xpu.current_stream()`` if no stream is specified.
"""
if stream is None:
stream = intel_extension_for_pytorch.xpu.current_stream()
super(Event, self).record(stream)
def wait(self, stream=None):
r"""Makes all future work submitted to the given stream wait for this
event.
Use ``intel_extension_for_pytorch.xpu.current_stream()`` if no stream is specified.
"""
if stream is None:
stream = intel_extension_for_pytorch.xpu.current_stream()
super(Event, self).wait(stream)
def query(self):
r"""Checks if all work currently captured by event has completed.
Returns:
A boolean indicating if all work currently captured by event has
completed.
"""
return super(Event, self).query()
def elapsed_time(self, end_event):
r"""Returns the time elapsed in milliseconds after the event was
recorded and before the end_event was recorded.
"""
return super(Event, self).elapsed_time(end_event)
def synchronize(self):
r"""Waits for the event to complete.
Waits until the completion of all work currently captured in this event.
This prevents the CPU thread from proceeding until the event completes.
"""
super(Event, self).synchronize()
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.dpcpp_event)
def __repr__(self):
if self.dpcpp_event:
return "<torch.xpu.Event {0:#x}>".format(self._as_parameter_.value)
else:
return "<torch.xpu.Event uninitialized>"
| 3,980 | 30.595238 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/memory.py | import collections
from typing import Any, Dict, Union
import intel_extension_for_pytorch
from torch.types import Device
from torch._utils import _get_device_index
def empty_cache() -> None:
r"""Releases all unoccupied cached memory currently held by the caching
allocator so that those can be used in other GPU application and visible in
sysman toolkit.
.. note::
:func:`~torch.xpu.empty_cache` doesn't increase the amount of GPU
memory available for PyTorch. However, it may help reduce fragmentation
of GPU memory in certain cases. See :ref:`xpu-memory-management` for
more details about GPU memory management.
"""
intel_extension_for_pytorch._C._emptyCache()
def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]:
r"""Returns a dictionary of XPU memory allocator statistics for a
given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of reserved segments from ``xpuMalloc()``.
- ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of reserved memory.
- ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of active memory blocks.
- ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of active memory.
- ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of inactive, non-releasable memory blocks.
- ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of inactive, non-releasable memory.
For these core statistics, values are broken down as follows.
Pool type:
- ``all``: combined statistics across all memory pools.
- ``large_pool``: statistics for the large allocation pool
(as of October 2019, for size >= 1MB allocations).
- ``small_pool``: statistics for the small allocation pool
(as of October 2019, for size < 1MB allocations).
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_alloc_retries"``: number of failed ``xpuMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistics for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = memory_stats_as_nested_dict(device=device)
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]:
r"""Returns the result of :func:`~torch.xpu.memory_stats` as a nested dictionary."""
device = _get_device_index(device, optional=True)
return intel_extension_for_pytorch._C._memoryStats(device)
def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None:
r"""Resets the "accumulated" (historical) stats tracked by the XPU memory allocator.
See :func:`~torch.xpu.memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
`"num_alloc_retries"` and `"num_ooms"`.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return intel_extension_for_pytorch._C._resetAccumulatedMemoryStats(device)
def reset_peak_memory_stats(device: Union[Device, int] = None) -> None:
r"""Resets the "peak" stats tracked by the XPU memory allocator.
See :func:`~torch.xpu.memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return intel_extension_for_pytorch._C._resetPeakMemoryStats(device)
def memory_allocated(device: Union[Device, int] = None) -> int:
r"""Returns the current GPU memory occupied by tensors in bytes for a given
device.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
This is likely less than the amount shown in sysman toolkit since some
unused memory can be held by the caching allocator and some context
needs to be created on GPU. See :ref:`xpu-memory-management` for more
details about GPU memory management.
"""
return memory_stats(device=device)["allocated_bytes.all.current"]
def max_memory_allocated(device: Union[Device, int] = None) -> int:
r"""Returns the maximum GPU memory occupied by tensors in bytes for a given
device.
By default, this returns the peak allocated memory since the beginning of
this program. :func:`~torch.xpu.reset_peak_stats` can be used to
reset the starting point in tracking this metric. For example, these two
functions can measure the peak allocated memory usage of each iteration in a
training loop.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device)["allocated_bytes.all.peak"]
def memory_reserved(device: Union[Device, int] = None) -> int:
r"""Returns the current GPU memory managed by the caching allocator in bytes
for a given device.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device)["reserved_bytes.all.current"]
def max_memory_reserved(device: Union[Device, int] = None) -> int:
r"""Returns the maximum GPU memory managed by the caching allocator in bytes
for a given device.
By default, this returns the peak cached memory since the beginning of this
program. :func:`~torch.xpu.reset_peak_stats` can be used to reset
the starting point in tracking this metric. For example, these two functions
can measure the peak cached memory amount of each iteration in a training
loop.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device)["reserved_bytes.all.peak"]
def memory_snapshot():
r"""Returns a snapshot of the XPU memory allocator state across all devices.
Interpreting the output of this function requires familiarity with the
memory allocator internals.
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
return intel_extension_for_pytorch._C._memorySnapshot()
def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str:
r"""Returns a human-readable printout of the current memory allocator
statistics for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Arguments:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
abbreviated (bool, optional): whether to return an abbreviated summary
(default: False).
.. note::
See :ref:`xpu-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
stats = memory_stats(device=device)
def _format_size(sz, pref_sz):
prefixes = ["B ", "KB", "MB", "GB", "TB", "PB"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_sz < 768 * 1024:
break
prefix = new_prefix
sz //= 1024
pref_sz /= 1024
return "{:7d} {}".format(sz, prefix)
def _format_count(cnt, pref_cnt):
prefixes = [" ", "K", "M"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_cnt < 750 * 1000:
break
prefix = new_prefix
cnt //= 1000
pref_cnt /= 1000
return "{:7d} {} ".format(cnt, prefix)
metrics_to_display = [
("allocated_bytes", "Allocated memory", _format_size),
("active_bytes", "Active memory", _format_size),
("reserved_bytes", "GPU reserved memory", _format_size),
("inactive_split_bytes", "Non-releasable memory", _format_size),
("allocation", "Allocations", _format_count),
("active", "Active allocs", _format_count),
("segment", "GPU reserved segments", _format_count),
("inactive_split", "Non-releasable allocs", _format_count),
]
lines = []
lines.append("=" * 75)
lines.append(" {_:16} PyTorch XPU memory summary, device ID {device:<17d} ")
lines.append("-" * 75)
lines.append(
" {_:9} XPU OOMs: {num_ooms:<12d} | {_:6} xpuMalloc retries: {num_alloc_retries:<8d} "
)
lines.append("=" * 75)
lines.append(
" Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed "
)
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
submetrics = [("all", metric_name)]
if not abbreviated:
submetrics.append(("large_pool", " from large pool"))
submetrics.append(("small_pool", " from small pool"))
current_prefval, peak_prefval, allocated_prefval, freed_prefval = (
None,
None,
None,
None,
)
for submetric_key, submetric_name in submetrics:
prefix = metric_key + "." + submetric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
if current_prefval is None:
current_prefval = current
peak_prefval = peak
allocated_prefval = allocated
freed_prefval = freed
lines.append(
" {:<21} | {} | {} | {} | {} ".format(
submetric_name,
formatter(current, current_prefval),
formatter(peak, peak_prefval),
formatter(allocated, allocated_prefval),
formatter(freed, freed_prefval),
),
)
lines.append("=" * 75)
fmt_dict = {"_": "", "device": device}
for k, v in stats.items():
fmt_dict[k.replace(".", "-")] = v
return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n"
| 13,274 | 37.367052 | 96 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/utils.py | # coding: utf-8
import torch
from .. import _C
from enum import Enum
import warnings
from .. import frontend
import intel_extension_for_pytorch # noqa
def from_usm(src, dtype, shape, stride=None, device_id: int = -1) -> torch.Tensor:
"""from_usm(src, dtype, shape, stride=None, device_d=-1) -> Tensor
Converts a tensor allocated in USM(United Shared Memory) into a ``torch.Tensor``.
The returned PyTorch tensor will share the memory with the input tensor
(which may have come from another library). Note that in-place operations
will therefore also affect the data of the input tensor. And this API doesn't
manage USM tensor src's lifetime. Please take care it carefully.
Args:
src: A capsule of USM pointer to convert, the name stored in the capsule
is 'USMtensor'.
dtype: the desired data type of returned tensor.
shape: the desired shape of returned tensor.
stride: the desired stride of returned tensor. Default: if None,
returned tensor is contiguous.
device_id: the root device id where the USM pointer is allocated. Default: -1,
if the user is not sure.
Warning: This is decrepated. Please use torch.from_dlpack instead.
"""
warnings.warn("from_usm is decrepated. Please use torch.from_dlpack instead.")
return _C._from_usm(src, dtype, shape, stride, device_id)
def to_usm(src: torch.Tensor):
"""to_usm(src: torch.Tensor): -> PyCapsule
Converts a torch tensor allocated in USM(United Shared Memory) into a ``PyCapsule``,
which encapsules a USM data pointer address.
Args:
src: a torch tensor.
Warning: This is decrepated. Please use torch.to_dlpack instead.
"""
warnings.warn("to_usm is decrepated. Please use torch.to_dlpack instead.")
return _C._to_usm(src)
def is_contiguous_channels_last_1d(input):
if 3 != input.dim():
return False
tmpTen = input.view(input.size(0), input.size(1), 1, input.size(2))
if tmpTen.is_contiguous(memory_format=torch.channels_last):
return True
else:
return False
def has_onemkl():
return _C._is_onemkl_enabled()
def has_multi_context():
return _C._is_multi_context_enabled()
def has_channels_last_1d():
return _C._is_channels_last_1d_enabled()
def has_fp64_dtype(device: int = -1) -> bool:
r"""Returns a bool indicating if the current XPU device supports dtype float64"""
return _C._has_fp64_dtype(device)
def has_2d_block_array(device: int = -1) -> bool:
r"""Returns a bool indicating if the platform supports 2d block array load/store"""
return _C._has_2d_block_array(device)
# Basic OnOff
class OnOff:
def __init__(self, checker, enable, disable):
self._init_status = checker()
self._enabled = True
self._disabled = False
self._enable_fn = enable
self._disable_fn = disable
def __enter__(self):
if self._init_status == self._disabled:
self._enable_fn()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._init_status == self._disabled:
self._disable_fn()
return False
class EnumBase(Enum):
@classmethod
def convert(cls, value):
if isinstance(value, cls):
return value
if isinstance(value, str) and value.isdecimal():
value = int(value)
if isinstance(value, int) and cls.has_value(value):
return cls(value)
raise RuntimeError("Unexpected {} value {}!".format(cls, value))
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
@classmethod
def get_value(cls, get_func):
return cls(get_func())
@classmethod
def set_value(cls, set_func, value):
return set_func(cls.convert(value).value)
# Verbose Level
class VerbLevel(EnumBase):
OFF = 0
ON = 1
def get_verbose_level():
return VerbLevel.get_value(_C._get_verbose_level)
def set_verbose_level(level):
VerbLevel.set_value(_C._set_verbose_level, level)
# oneDNN Verbose
class OnednnVerbLevel(EnumBase):
OFF = 0
ON = 1
ON_DETAIL = 2
def set_onednn_verbose(level):
st = OnednnVerbLevel.set_value(_C._set_onednn_verbose, level)
assert bool(st), "WARNING: Failed to turn on oneDNN verbose!"
class onednn_verbose(object):
def __init__(self, level):
self.level = OnednnVerbLevel.convert(level)
def __enter__(self):
if self.level != OnednnVerbLevel.OFF:
set_onednn_verbose(self.level)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
set_onednn_verbose(OnednnVerbLevel.OFF)
return False
# oneMKL Verbose
class OnemklVerbLevel(EnumBase):
OFF = 0
ON = 1
ON_SYNC = 2
def set_onemkl_verbose(level):
st = OnemklVerbLevel.set_value(_C._set_onemkl_verbose, level)
assert bool(st), "WARNING: Failed to turn on oneMKL verbose!"
class onemkl_verbose(object):
def __init__(self, level):
self.level = OnemklVerbLevel.convert(level)
def __enter__(self):
if self.level != OnemklVerbLevel.OFF:
set_onemkl_verbose(self.level)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
set_onemkl_verbose(OnemklVerbLevel.OFF)
return False
def optimize(
model,
dtype=None,
optimizer=None,
level="O1",
inplace=False,
conv_bn_folding=None,
linear_bn_folding=None,
weights_prepack=None,
replace_dropout_with_identity=None,
optimize_lstm=None,
split_master_weight_for_bf16=None,
fuse_update_step=None,
auto_kernel_selection=None,
sample_input=None,
graph_mode=None,
):
r"""
torch.xpu.optimize is an alternative of optimize API in Intel® Extension for
PyTorch*, to provide identical usage for XPU device only. The motivation of
adding this alias is to unify the coding style in user scripts base on torch.xpu
modular.
TODO: When finish merge frontend code, add other aurgments describtion here.
Args (Specific default values for XPU device):
inplace (bool): Default set false to save valuable XPU device memory.
weights_prepack (bool): Disabled for XPU device.
sample_input (tuple or torch.Tensor): Disabled for XPU device.
Examples:
>>> # bfloat16 inference case.
>>> model = ...
>>> model.load_state_dict(torch.load(PATH))
>>> model.eval()
>>> optimized_model = torch.xpu.optimize(model, dtype=torch.bfloat16)
>>> # running evaluation step.
>>> # bfloat16 training case.
>>> optimizer = ...
>>> model.train()
>>> optimized_model, optimized_optimizer = torch.xpu.optimize(model, dtype=torch.bfloat16, optimizer=optimizer)
>>> # running training step.
"""
return frontend.optimize(
model,
dtype,
optimizer,
level,
inplace,
conv_bn_folding,
linear_bn_folding,
weights_prepack,
replace_dropout_with_identity,
optimize_lstm,
split_master_weight_for_bf16,
fuse_update_step,
auto_kernel_selection,
sample_input,
graph_mode,
)
class FP32MathMode(EnumBase):
FP32 = intel_extension_for_pytorch._C.XPUFP32MathMode.FP32
TF32 = intel_extension_for_pytorch._C.XPUFP32MathMode.TF32
BF32 = intel_extension_for_pytorch._C.XPUFP32MathMode.BF32
def get_fp32_math_mode():
return FP32MathMode.get_value(intel_extension_for_pytorch._C._get_fp32_math_mode)
def set_fp32_math_mode(mode):
st = FP32MathMode.set_value(
intel_extension_for_pytorch._C._set_fp32_math_mode, mode
)
assert bool(st), "WARNING: Failed to set FP32 math mode!"
class fp32_math_mode(object):
def __init__(self, mode):
self.mode = FP32MathMode.convert(mode)
def __enter__(self):
current_math_mode = get_fp32_math_mode()
if self.mode != current_math_mode:
set_fp32_math_mode(self.mode)
self.mode = current_math_mode
return self
def __exit__(self, exc_type, exc_val, exc_tb):
set_fp32_math_mode(self.mode)
return False
# Sync Execution Mode
def using_sync_mode():
return _C._is_sync_mode()
def enable_sync_mode():
_C._enable_sync_mode()
def disable_sync_mode():
_C._disable_sync_mode()
class sync_mode(OnOff):
def __init__(self):
super().__init__(using_sync_mode, enable_sync_mode, disable_sync_mode)
# Tile Partition As Device
def using_tile_as_device():
return _C._is_tile_as_device_enabled()
# Only work before lazy init
def enable_tile_as_device():
_C._enable_tile_as_device()
# Only work before lazy init
def disable_tile_as_device():
_C._disable_tile_as_device()
################################################################
# EXPERIMENTAL options:
# NOTE: Below options are under experimental.
# They are instable, and may be removed without notice!
################################################################
def has_jit_quantization_save():
return _C._is_jit_quantization_save_enabled()
# oneDNN Layout
def using_onednn_layout():
return _C._is_onednn_layout_enabled()
def is_onednn_layout(tensor):
return torch.ops.torch_ipex.check_onednn_layout(tensor)
def enable_onednn_layout():
_C._enable_onednn_layout()
def disable_onednn_layout():
_C._disable_onednn_layout()
class onednn_layout(OnOff):
def __init__(self):
super().__init__(
using_onednn_layout, enable_onednn_layout, disable_onednn_layout
)
# For several primitive implementations, force to set compute engine
class XPUComputeEng(EnumBase):
RECOMMEND = intel_extension_for_pytorch._C.XPUComputeEng.RECOMMEND
BASIC = intel_extension_for_pytorch._C.XPUComputeEng.BASIC
ONEDNN = intel_extension_for_pytorch._C.XPUComputeEng.ONEDNN
ONEMKL = intel_extension_for_pytorch._C.XPUComputeEng.ONEMKL
XETLA = intel_extension_for_pytorch._C.XPUComputeEng.XETLA
def get_compute_eng():
return XPUComputeEng.get_value(intel_extension_for_pytorch._C._get_compute_eng)
def set_compute_eng(eng):
st = XPUComputeEng.set_value(intel_extension_for_pytorch._C._set_compute_eng, eng)
assert bool(st), "WARNING: Failed to set XPU compute engine!"
class compute_eng(object):
def __init__(self, eng):
self.eng = XPUComputeEng.convert(eng)
def __enter__(self):
current_compute_eng = get_compute_eng()
if self.eng != current_compute_eng:
set_compute_eng(self.eng)
self.eng = current_compute_eng
return self
def __exit__(self, exc_type, exc_val, exc_tb):
set_compute_eng(self.eng)
return False
# Simple Trace
def using_simple_trace():
return _C._is_simple_trace_enabled()
def enable_simple_trace():
_C._enable_simple_trace()
def disable_simple_trace():
_C._disable_simple_trace()
class simple_trace(OnOff):
def __init__(self):
super().__init__(using_simple_trace, enable_simple_trace, disable_simple_trace)
| 11,158 | 26.35049 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/launch.py | import platform
import subprocess
import os
import sys
import logging
from tempfile import mkstemp
import uuid
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=format_str)
logger = logging.getLogger(__name__)
def apply_monkey_patch(args):
# Auto apply the ipex features
# Open the original file and get the content
program = args.program
with open(program) as f:
original_program_lines = f.readlines()
# Modify the content with import ipex
monkey_patch = """import torch
import intel_extension_for_pytorch as ipex
"""
if args.convert_fp64_to_fp32:
monkey_patch += """ipex.xpu.overrides.convert_default_dtype(torch.float64, torch.float32, True)
"""
original_program_lines.insert(0, monkey_patch)
program_absolute_path = os.path.abspath(program)
program_absolute_path_dir = os.path.dirname(program_absolute_path)
generate_file_suffix = (
str(hash(program_absolute_path)) + str(uuid.uuid1()) + "_auto_ipex"
)
_, generate_file = mkstemp(
suffix=generate_file_suffix, dir=program_absolute_path_dir, text=True
)
# Write the monkey_patched content to temp file
with open(generate_file, "w") as f:
f.writelines(original_program_lines)
return generate_file
class Launcher:
r"""
Base class for launcher
"""
def __init__(self):
pass
def launch(self, args):
pass
def logger_env(self, env_name=""):
if env_name in os.environ:
logger.info("{}={}".format(env_name, os.environ[env_name]))
def set_env(self, env_name, env_value=None):
if not env_value:
logger.warning("{} is None".format(env_name))
if env_name not in os.environ:
os.environ[env_name] = env_value
elif os.environ[env_name] != env_value:
logger.warning(
"{} in environment variable is {} while the value you set is {}".format(
env_name, os.environ[env_name], env_value
)
)
self.logger_env(env_name)
class XPUDefaultLauncher(Launcher):
"""
Run the program using XPU.
# Note: For now, we only support single instance in this script
"""
def launch(self, args):
processes = []
cmd = []
monkey_program = apply_monkey_patch(args)
cmd.append(sys.executable)
cmd.append(monkey_program)
cmd.extend(args.program_args)
cmd_s = " ".join(cmd)
process = subprocess.Popen(cmd_s, env=os.environ, shell=True)
processes.append(process)
try:
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd=cmd_s
)
except subprocess.CalledProcessError as e:
print(e.output)
finally:
os.remove(monkey_program)
def init_parser(parser):
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
# positional
parser.add_argument(
"--convert-fp64-to-fp32",
"--convert_fp64_to_fp32",
action="store_true",
dest="convert_fp64_to_fp32",
help="To automatically convert torch.float64(double) dtype to torch.float32",
)
parser.add_argument(
"program",
type=str,
help="The full path to the proram/script to be launched. "
"followed by all the arguments for the script",
)
# rest from the training program
parser.add_argument("program_args", nargs=REMAINDER)
return parser
def run_main_with_args(args):
env_before = set(os.environ.keys())
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported!!!")
launcher = None
launcher = XPUDefaultLauncher()
launcher.launch(args)
for x in sorted(set(os.environ.keys()) - env_before):
logger.debug("{0}={1}".format(x, os.environ[x]))
def main():
parser = ArgumentParser(
description="This is a script for launching PyTorch training and inference on Intel GPU Series"
"with optimal configurations. "
"\n################################# Basic usage ############################# \n"
"\n 1. Run with args\n"
"\n >>> ipexrun xpu python_script args \n"
"\n############################################################################# \n",
formatter_class=RawTextHelpFormatter,
)
parser = init_parser(parser)
args = parser.parse_args()
run_main_with_args(args)
if __name__ == "__main__":
main()
| 4,854 | 28.603659 | 103 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/random.py | import torch
from typing import cast, Iterable, List, Union
from torch import Tensor
from .lazy_init import _lazy_init, _lazy_call
import contextlib
from typing import Generator
import warnings
__all__ = [
"get_rng_state",
"get_rng_state_all",
"set_rng_state",
"set_rng_state_all",
"manual_seed",
"manual_seed_all",
"seed",
"seed_all",
"initial_seed",
"fork_rng",
]
def get_rng_state(device: Union[int, str, torch.device] = "xpu") -> Tensor:
r"""Returns the random number generator state of the specified GPU as a ByteTensor.
Args:
device (torch.device or int, optional): The device to return the RNG state of.
Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).
.. warning::
This function eagerly initializes XPU.
"""
_lazy_init()
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("xpu", device)
idx = device.index
if idx is None:
idx = torch.xpu.current_device()
default_generator = torch.xpu.default_generators[idx]
return default_generator.get_state()
def get_rng_state_all() -> List[Tensor]:
r"""Returns a list of ByteTensor representing the random number states of all devices."""
results = []
for i in range(torch.xpu.device_count()):
results.append(get_rng_state(i))
return results
def set_rng_state(
new_state: Tensor, device: Union[int, str, torch.device] = "xpu"
) -> None:
r"""Sets the random number generator state of the specified GPU.
Args:
new_state (torch.ByteTensor): The desired state
device (torch.device or int, optional): The device to set the RNG state.
Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).
"""
new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("xpu", device)
def cb():
idx = cast(torch.device, device).index
if idx is None:
idx = torch.xpu.current_device()
default_generator = torch.xpu.default_generators[idx]
default_generator.set_state(new_state_copy)
_lazy_call(cb)
def set_rng_state_all(new_states: Iterable[Tensor]) -> None:
r"""Sets the random number generator state of all devices.
Args:
new_states (Iterable of torch.ByteTensor): The desired state for each device"""
for i, state in enumerate(new_states):
set_rng_state(state, i)
def manual_seed(seed: int) -> None:
r"""Sets the seed for generating random numbers for the current GPU.
It's safe to call this function if XPU is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
.. warning::
If you are working with a multi-GPU model, this function is insufficient
to get determinism. To seed all GPUs, use :func:`manual_seed_all`.
"""
seed = int(seed)
def cb():
idx = torch.xpu.current_device()
default_generator = torch.xpu.default_generators[idx]
default_generator.manual_seed(seed)
_lazy_call(cb)
def manual_seed_all(seed: int) -> None:
r"""Sets the seed for generating random numbers on all GPUs.
It's safe to call this function if XPU is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
def cb():
for i in range(torch.xpu.device_count()):
default_generator = torch.xpu.default_generators[i]
default_generator.manual_seed(seed)
_lazy_call(cb, seed_all=True)
def seed() -> None:
r"""Sets the seed for generating random numbers to a random number for the current GPU.
It's safe to call this function if XPU is not available; in that
case, it is silently ignored.
.. warning::
If you are working with a multi-GPU model, this function will only initialize
the seed on one GPU. To initialize all GPUs, use :func:`seed_all`.
"""
def cb():
idx = torch.xpu.current_device()
default_generator = torch.xpu.default_generators[idx]
default_generator.seed()
_lazy_call(cb)
def seed_all() -> None:
r"""Sets the seed for generating random numbers to a random number on all GPUs.
It's safe to call this function if XPU is not available; in that
case, it is silently ignored.
"""
def cb():
random_seed = 0
seeded = False
for i in range(torch.xpu.device_count()):
default_generator = torch.xpu.default_generators[i]
if not seeded:
default_generator.seed()
random_seed = default_generator.initial_seed()
seeded = True
else:
default_generator.manual_seed(random_seed)
_lazy_call(cb)
def initial_seed() -> int:
r"""Returns the current random seed of the current GPU.
.. warning::
This function eagerly initializes XPU.
"""
# lazy initialization occurs in current_device
idx = torch.xpu.current_device()
default_generator = torch.xpu.default_generators[idx]
return default_generator.initial_seed()
_fork_rng_warned_already = False
@contextlib.contextmanager
def fork_rng(
devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices"
) -> Generator:
"""
Forks the RNG, so that when you return, the RNG is reset
to the state that it was previously in.
Args:
devices (iterable of XPU IDs): XPU devices for which to fork
the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
on all devices, but will emit a warning if your machine has a lot
of devices, since this function will run very slowly in that case.
If you explicitly specify devices, this warning will be suppressed
enabled (bool): if ``False``, the RNG is not forked. This is a convenience
argument for easily disabling the context manager without having
to delete it and unindent your Python code under it.
"""
global _fork_rng_warned_already
# Internal arguments:
# _caller: the function which called fork_rng, which the user used
# _devices_kw: the devices keyword of _caller
if not enabled:
yield
return
if devices is None:
num_devices = torch.xpu.device_count()
if num_devices > 1 and not _fork_rng_warned_already:
warnings.warn(
(
"XPU reports that you have {num_devices} available devices, and you "
"have used {caller} without explicitly specifying which devices are being used. "
"For safety, we initialize *every* XPU device by default, which "
"can be quite slow if you have a lot of GPUs. If you know that you are only "
"making use of a few XPU devices, set the environment variable XPU_VISIBLE_DEVICES "
"or the '{devices_kw}' keyword argument of {caller} with the set of devices "
"you are actually using. For example, if you are using CPU only, "
"set XPU_VISIBLE_DEVICES= or devices=[]; if you are using "
"GPU 0 only, set XPU_VISIBLE_DEVICES=0 or devices=[0]. To initialize "
"all devices and suppress this warning, set the '{devices_kw}' keyword argument "
"to `range(torch.xpu.device_count())`."
).format(
num_devices=num_devices, caller=_caller, devices_kw=_devices_kw
)
)
_fork_rng_warned_already = True
devices = list(range(num_devices))
else:
# Protect against user passing us a generator; we need to traverse this
# multiple times but a generator will be exhausted upon first traversal
devices = list(devices)
cpu_rng_state = torch.get_rng_state()
gpu_rng_states = []
for device in devices:
gpu_rng_states.append(torch.xpu.get_rng_state(device))
try:
yield
finally:
torch.set_rng_state(cpu_rng_state)
for device, gpu_rng_state in zip(devices, gpu_rng_states):
torch.xpu.set_rng_state(gpu_rng_state, device)
| 8,518 | 32.671937 | 104 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/cpp_extension.py | import copy
import importlib
import os
import setuptools
import subprocess
import shutil
import re
import shlex
import sys
import sysconfig
import errno
import warnings
import torch
from torch.utils.cpp_extension import _TORCH_PATH
from torch.utils.file_baton import FileBaton
from torch.utils._cpp_extension_versioner import ExtensionVersioner
from torch.utils.hipify.hipify_python import GeneratedFileCleaner
from typing import List, Optional, Union, Tuple
from torch.torch_version import TorchVersion
from setuptools.command.build_ext import build_ext
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform.startswith("darwin")
IS_LINUX = sys.platform.startswith("linux")
LIB_EXT = ".pyd" if IS_WINDOWS else ".so"
EXEC_EXT = ".exe" if IS_WINDOWS else ""
CLIB_PREFIX = "" if IS_WINDOWS else "lib"
CLIB_EXT = ".dll" if IS_WINDOWS else ".so"
SHARED_FLAG = "/DLL" if IS_WINDOWS else "-shared"
MINIMUM_GCC_VERSION = (5, 0, 0)
MINIMUM_MSVC_VERSION = (19, 0, 24215)
COMMON_MSVC_FLAGS = [
"/MD",
"/wd4819",
"/wd4251",
"/wd4244",
"/wd4267",
"/wd4275",
"/wd4018",
"/wd4190",
"/EHsc",
]
COMMON_DPCPP_FLAGS = ["-fPIC"]
TORCH_LIB_PATH = os.path.join(_TORCH_PATH, "lib")
JIT_EXTENSION_VERSIONER = ExtensionVersioner()
# Taken directly from python stdlib < 3.9
# See https://github.com/pytorch/pytorch/issues/48617
def _nt_quote_args(args: Optional[List[str]]) -> List[str]:
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# Cover None-type
if not args:
return []
return [f'"{arg}"' if " " in arg else arg for arg in args]
def get_default_build_root() -> str:
r"""
Returns the path to the root folder under which extensions will built.
For each extension module built, there will be one folder underneath the
folder returned by this function. For example, if ``p`` is the path
returned by this function and ``ext`` the name of an extension, the build
folder for the extension will be ``p/ext``.
This directory is **user-specific** so that multiple users on the same
machine won't meet permission issues.
"""
return os.path.realpath(torch._appdirs.user_cache_dir(appname="torch_extensions"))
def _get_exec_path(module_name, path):
if IS_WINDOWS and TORCH_LIB_PATH not in os.getenv("PATH", "").split(";"):
torch_lib_in_path = any(
os.path.exists(p) and os.path.samefile(p, TORCH_LIB_PATH)
for p in os.getenv("PATH", "").split(";")
)
if not torch_lib_in_path:
os.environ["PATH"] = f"{TORCH_LIB_PATH};{os.getenv('PATH', '')}"
return os.path.join(path, f"{module_name}{EXEC_EXT}")
def get_dpcpp_complier():
# build cxx via dpcpp
dpcpp_cmp = shutil.which("icpx")
if dpcpp_cmp is None:
raise RuntimeError("Failed to find compiler path from OS PATH")
_cxxbin = os.getenv("CXX")
if _cxxbin is not None:
dpcpp_cmp = _cxxbin
return dpcpp_cmp
def get_icx_complier():
# build cc via icx
icx_cmp = shutil.which("icx")
if icx_cmp is None:
raise RuntimeError("Failed to find compiler path from OS PATH")
_ccbin = os.getenv("CC")
if _ccbin is not None:
dpcpp_cmp = _ccbin
return icx_cmp
def is_ninja_available():
r"""
Returns ``True`` if the `ninja <https://ninja-build.org/>`_ build system is
available on the system, ``False`` otherwise.
"""
try:
subprocess.check_output("ninja --version".split())
except Exception:
return False
else:
return True
def verify_ninja_availability():
r"""
Raises ``RuntimeError`` if `ninja <https://ninja-build.org/>`_ build system is not
available on the system, does nothing otherwise.
"""
if not is_ninja_available():
raise RuntimeError("Ninja is required to load C++ extensions")
def _is_cpp_file(path: str) -> bool:
valid_ext = [".cpp", ".hpp"]
return os.path.splitext(path)[1] in valid_ext
def _is_c_file(path: str) -> bool:
valid_ext = [".c", ".h"]
return os.path.splitext(path)[1] in valid_ext
class DpcppBuildExtension(build_ext, object):
r"""
A custom :mod:`setuptools` build extension .
This :class:`setuptools.build_ext` subclass takes care of passing the
minimum required compiler flags (e.g. ``-std=c++17``) as well as DPCPP
compilation.
When using :class:`DpcppBuildExtension`, it is allowed to supply a dictionary
for ``extra_compile_args`` (rather than the usual list) that maps from
languages (``cxx``) to a list of additional compiler flags to supply to the
compiler.
``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we
attempt to build using the Ninja backend. Ninja greatly speeds up
compilation compared to the standard ``setuptools.build_ext``.
Fallbacks to the standard distutils backend if Ninja is not available.
``no_python_abi_suffix`` (bool): If ``no_python_abi_suffix`` is ``False`` (default),
then we attempt to build module with python abi suffix, example:
output module name: module_name.cpython-37m-x86_64-linux-gnu.so, the
``cpython-37m-x86_64-linux-gnu`` is append python abi suffix.
.. note::
By default, the Ninja backend uses #CPUS + 2 workers to build the
extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
"""
@classmethod
def with_options(cls, **options):
r"""
Returns a subclass with alternative constructor that extends any original keyword
arguments to the original constructor with the given options.
"""
class cls_with_options(cls): # type: ignore[misc, valid-type]
def __init__(self, *args, **kwargs):
kwargs.update(options)
super().__init__(*args, **kwargs)
return cls_with_options
def __init__(self, *args, **kwargs) -> None:
super(DpcppBuildExtension, self).__init__(*args, **kwargs)
self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False)
self.use_ninja = kwargs.get("use_ninja", True)
if self.use_ninja:
# Test if we can use ninja. Fallback otherwise.
msg = (
"Attempted to use ninja as the BuildExtension backend but "
"{}. Falling back to using the slow distutils backend."
)
if not is_ninja_available():
warnings.warn(msg.format("we could not find ninja."))
self.use_ninja = False
def finalize_options(self) -> None:
super().finalize_options()
if self.use_ninja:
self.force = True
def build_extensions(self) -> None:
dpcpp_ext = False
extension_iter = iter(self.extensions)
extension = next(extension_iter, None)
while not dpcpp_ext and extension:
extension = next(extension_iter, None)
for extension in self.extensions:
# Ensure at least an empty list of flags for 'cxx' when
# extra_compile_args is a dict. Otherwise, default torch
# flags do not get passed. Necessary when only one of 'cxx' is
# passed to extra_compile_args in DPCPPExtension, i.e.
# DPCPPExtension(..., extra_compile_args={'cxx': [...]})
if isinstance(extension.extra_compile_args, dict):
for ext in ["cxx"]:
if ext not in extension.extra_compile_args:
extension.extra_compile_args[ext] = []
self._add_compile_flag(extension, "-DTORCH_API_INCLUDE_EXTENSION_H")
# See note [Pybind11 ABI constants]
for name in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
val = getattr(torch._C, f"_PYBIND11_{name}")
if val is not None and not IS_WINDOWS:
self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"')
self._define_torch_extension_name(extension)
self._add_gnu_cpp_abi_flag(extension)
# Save the original _compile method for later.
if self.compiler.compiler_type == "msvc":
original_compile = self.compiler.compile
original_spawn = self.compiler.spawn
else:
original_compile = self.compiler._compile
# save origin function for passthough
original_link_shared_object = self.compiler.link_shared_object
original_spawn = self.compiler.spawn
def append_std17_if_no_std_present(cflags) -> None:
cpp_format_prefix = (
"/{}:" if self.compiler.compiler_type == "msvc" else "-{}="
)
cpp_flag_prefix = cpp_format_prefix.format("std")
cpp_flag = cpp_flag_prefix + "c++17"
if not any(flag.startswith(cpp_flag_prefix) for flag in cflags):
cflags.append(cpp_flag)
def unix_dpcpp_flags(cflags):
cflags = COMMON_DPCPP_FLAGS + cflags
return cflags
def convert_to_absolute_paths_inplace(paths):
# Helper function. See Note [Absolute include_dirs]
if paths is not None:
for i in range(len(paths)):
if not os.path.isabs(paths[i]):
paths[i] = os.path.abspath(paths[i])
def unix_wrap_single_compile(
obj, src, ext, cc_args, extra_postargs, pp_opts
) -> None:
# Copy before we make any modifications.
cflags = copy.deepcopy(extra_postargs)
try:
original_compiler = self.compiler.compiler_so
if _is_cpp_file(src):
_cxxbin = get_dpcpp_complier()
self.compiler.set_executable("compiler_so", _cxxbin)
if isinstance(cflags, dict):
cflags = cflags["cxx"]
else:
cflags = unix_dpcpp_flags(cflags)
elif _is_c_file(src):
_ccbin = get_icx_complier()
self.compiler.set_executable("compiler_so", _ccbin)
if isinstance(cflags, dict):
cflags = cflags["cxx"]
else:
cflags = unix_dpcpp_flags(cflags)
elif isinstance(cflags, dict):
cflags = cflags["cxx"]
append_std17_if_no_std_present(cflags)
original_compile(obj, src, ext, cc_args, cflags, pp_opts)
finally:
# Put the original compiler back in place.
self.compiler.set_executable("compiler_so", original_compiler)
def _gen_link_lib_cmd_line(
linker,
objects,
target_name,
library_dirs,
runtime_library_dirs,
libraries,
extra_postargs,
):
cmd_line = []
library_dirs_args = []
library_dirs_args += [f"-L{x}" for x in library_dirs]
runtime_library_dirs_args = []
runtime_library_dirs_args += [f"-L{x}" for x in runtime_library_dirs]
libraries_args = []
libraries_args += [f"-l{x}" for x in libraries]
common_args = ["-shared"]
"""
link command formats:
cmd = [LD common_args objects library_dirs_args runtime_library_dirs_args libraries_args
-o target_name extra_postargs]
"""
cmd_line += [linker]
cmd_line += common_args
cmd_line += objects
cmd_line += library_dirs_args
cmd_line += runtime_library_dirs_args
cmd_line += libraries_args
cmd_line += ["-o"]
cmd_line += [target_name]
cmd_line += extra_postargs
return cmd_line
def create_parent_dirs_by_path(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def unix_wrap_single_link_shared_object(
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None,
):
# create output directories avoid linker error.
create_parent_dirs_by_path(output_libname)
_cxxbin = get_dpcpp_complier()
cmd = _gen_link_lib_cmd_line(
_cxxbin,
objects,
output_libname,
library_dirs,
runtime_library_dirs,
libraries,
extra_postargs,
)
return original_spawn(cmd)
def unix_wrap_ninja_compile(
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
r"""Compiles sources by outputting a ninja file and running it."""
# NB: I copied some lines from self.compiler (which is an instance
# of distutils.UnixCCompiler). See the following link.
# https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567
# This can be fragile, but a lot of other repos also do this
# (see https://github.com/search?q=_setup_compile&type=Code)
# so it is probably OK; we'll also get CI signal if/when
# we update our python version (which is when distutils can be
# upgraded)
# Use absolute path for output_dir so that the object file paths
# (`objects`) get generated with absolute paths.
output_dir = os.path.abspath(output_dir)
# See Note [Absolute include_dirs]
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = self.compiler._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs)
extra_cc_cflags = self.compiler.compiler_so[1:]
# extra_postargs can be either:
# - a dict mapping cxx to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs["cxx"]
else:
post_cflags = list(extra_postargs)
append_std17_if_no_std_present(post_cflags)
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags],
post_cflags=[shlex.quote(f) for f in post_cflags],
build_directory=output_dir,
verbose=True,
)
# Return *all* object filenames, not just the ones we just built.
return objects
if self.compiler.compiler_type == "msvc":
raise "Not implemented"
else:
if self.use_ninja:
self.compiler.compile = unix_wrap_ninja_compile
else:
self.compiler._compile = unix_wrap_single_compile
self.compiler.link_shared_object = unix_wrap_single_link_shared_object
build_ext.build_extensions(self)
def _add_compile_flag(self, extension, flag):
extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args)
if isinstance(extension.extra_compile_args, dict):
for args in extension.extra_compile_args.values():
args.append(flag)
else:
extension.extra_compile_args.append(flag)
def _define_torch_extension_name(self, extension):
# pybind11 doesn't support dots in the names
# so in order to support extensions in the packages
# like torch._C, we take the last part of the string
# as the library name
names = extension.name.split(".")
name = names[-1]
define = f"-DTORCH_EXTENSION_NAME={name}"
self._add_compile_flag(extension, define)
def _add_gnu_cpp_abi_flag(self, extension):
# use the same CXX ABI as what PyTorch was compiled with
self._add_compile_flag(
extension,
"-D_GLIBCXX_USE_CXX11_ABI=" + str(int(torch._C._GLIBCXX_USE_CXX11_ABI)),
)
SUBPROCESS_DECODE_ARGS = ("oem",) if IS_WINDOWS else ()
ABI_INCOMPATIBILITY_WARNING = """
!! WARNING !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Your compiler ({}) may be ABI-incompatible with PyTorch!
Please use a compiler that is ABI-compatible with GCC 5.0 and above.
See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html.
See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6
for instructions on how to install GCC 5 or higher.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING !!
"""
WRONG_COMPILER_WARNING = """
!! WARNING !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was
built with for this platform, which is {pytorch_compiler} on {platform}. Please
use {pytorch_compiler} to to compile your extension. Alternatively, you may
compile PyTorch from source using {user_compiler}, and then you can also use
{user_compiler} to compile your extension.
See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help
with compiling PyTorch from source.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING !!
"""
BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r"\d+\.\d+\.\d+\w+\+\w+")
def _is_binary_build() -> bool:
return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__)
def _accepted_compilers_for_platform() -> List[str]:
# gnu-c++ and gnu-cc are the conda gcc compilers
return ["clang++", "clang"] if IS_MACOS else ["icpx", "icx"]
def check_compiler_ok_for_platform(compiler: str) -> bool:
r"""
Verifies that the compiler is the expected one for the current platform.
Args:
compiler (str): The compiler executable to check.
Returns:
True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS,
and always True for Windows.
"""
if IS_WINDOWS:
return True
which = subprocess.check_output(["which", compiler], stderr=subprocess.STDOUT)
# Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'.
compiler_path = os.path.realpath(which.decode(*SUBPROCESS_DECODE_ARGS).strip())
# Check the compiler name
if any(name in compiler_path for name in _accepted_compilers_for_platform()):
return True
# If compiler wrapper is used try to infer the actual compiler by invoking it with -v flag
version_string = subprocess.check_output(
[compiler, "-v"], stderr=subprocess.STDOUT
).decode(*SUBPROCESS_DECODE_ARGS)
if IS_LINUX:
# Check for 'gcc' or 'g++' for sccache warpper
pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE)
results = re.findall(pattern, version_string)
if len(results) != 1:
return False
compiler_path = os.path.realpath(results[0].strip())
# On RHEL/CentOS c++ is a gcc compiler wrapper
if os.path.basename(compiler_path) == "c++" and "gcc version" in version_string:
return True
return any(name in compiler_path for name in _accepted_compilers_for_platform())
if IS_MACOS:
# Check for 'clang' or 'clang++'
return version_string.startswith("Apple clang")
return False
def check_compiler_abi_compatibility(compiler) -> bool:
r"""
Verifies that the given compiler is ABI-compatible with PyTorch.
Args:
compiler (str): The compiler executable name to check (e.g. ``g++``).
Must be executable in a shell process.
Returns:
False if the compiler is (likely) ABI-incompatible with PyTorch,
else True.
"""
if not _is_binary_build():
return True
if os.environ.get("TORCH_DONT_CHECK_COMPILER_ABI") in [
"ON",
"1",
"YES",
"TRUE",
"Y",
]:
return True
# First check if the compiler is one of the expected ones for the particular platform.
if not check_compiler_ok_for_platform(compiler):
warnings.warn(
WRONG_COMPILER_WARNING.format(
user_compiler=compiler,
pytorch_compiler=_accepted_compilers_for_platform()[0],
platform=sys.platform,
)
)
return False
if IS_MACOS:
# There is no particular minimum version we need for clang, so we're good here.
return True
try:
if IS_LINUX:
minimum_required_version = MINIMUM_GCC_VERSION
versionstr = subprocess.check_output(
[compiler, "-dumpfullversion", "-dumpversion"]
)
version = versionstr.decode(*SUBPROCESS_DECODE_ARGS).strip().split(".")
else:
minimum_required_version = MINIMUM_MSVC_VERSION
compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT)
match = re.search(
r"(\d+)\.(\d+)\.(\d+)",
compiler_info.decode(*SUBPROCESS_DECODE_ARGS).strip(),
)
version = ["0", "0", "0"] if match is None else list(match.groups())
except Exception:
_, error, _ = sys.exc_info()
warnings.warn(f"Error checking compiler version for {compiler}: {error}")
return False
if tuple(map(int, version)) >= minimum_required_version:
return True
compiler = f'{compiler} {".".join(version)}'
warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler))
return False
def get_compiler_abi_compatibility_and_version(compiler) -> Tuple[bool, TorchVersion]:
r"""
Determine if the given compiler is ABI-compatible with PyTorch alongside
its version.
Args:
compiler (str): The compiler executable name to check (e.g. ``g++``).
Must be executable in a shell process.
Returns:
A tuple that contains a boolean that defines if the compiler is (likely) ABI-incompatible with PyTorch,
followed by a `TorchVersion` string that contains the compiler version separated by dots.
"""
if not _is_binary_build():
return (True, TorchVersion("0.0.0"))
if os.environ.get("TORCH_DONT_CHECK_COMPILER_ABI") in [
"ON",
"1",
"YES",
"TRUE",
"Y",
]:
return (True, TorchVersion("0.0.0"))
# First check if the compiler is one of the expected ones for the particular platform.
if not check_compiler_ok_for_platform(compiler):
warnings.warn(
WRONG_COMPILER_WARNING.format(
user_compiler=compiler,
pytorch_compiler=_accepted_compilers_for_platform()[0],
platform=sys.platform,
)
)
return (False, TorchVersion("0.0.0"))
if IS_MACOS:
# There is no particular minimum version we need for clang, so we're good here.
return (True, TorchVersion("0.0.0"))
try:
if IS_LINUX:
minimum_required_version = MINIMUM_GCC_VERSION
versionstr = subprocess.check_output(
[compiler, "-dumpfullversion", "-dumpversion"]
)
version = versionstr.decode(*SUBPROCESS_DECODE_ARGS).strip().split(".")
else:
minimum_required_version = MINIMUM_MSVC_VERSION
compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT)
match = re.search(
r"(\d+)\.(\d+)\.(\d+)",
compiler_info.decode(*SUBPROCESS_DECODE_ARGS).strip(),
)
version = ["0", "0", "0"] if match is None else list(match.groups())
except Exception:
_, error, _ = sys.exc_info()
warnings.warn(f"Error checking compiler version for {compiler}: {error}")
return (False, TorchVersion("0.0.0"))
if tuple(map(int, version)) >= minimum_required_version:
return (True, TorchVersion(".".join(version)))
compiler = f'{compiler} {".".join(version)}'
warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler))
return (False, TorchVersion(".".join(version)))
def _write_ninja_file_and_compile_objects(
sources: List[str],
objects,
cflags,
post_cflags,
build_directory: str,
verbose: bool,
) -> None:
verify_ninja_availability()
if IS_WINDOWS:
compiler = os.environ.get("CXX", "cl")
else:
compiler = get_dpcpp_complier()
get_compiler_abi_compatibility_and_version(compiler)
build_file_path = os.path.join(build_directory, "build.ninja")
if verbose:
print(f"Emitting ninja build file {build_file_path}...")
_write_ninja_file(
path=build_file_path,
cflags=cflags,
post_cflags=post_cflags,
sources=sources,
objects=objects,
ldflags=None,
library_target=None,
)
if verbose:
print("Compiling objects...")
_run_ninja_build(
build_directory,
verbose,
# It would be better if we could tell users the name of the extension
# that failed to build but there isn't a good way to get it here.
error_prefix="Error compiling objects for extension",
)
def _write_ninja_file_and_build_library(
name,
sources: List[str],
extra_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
is_standalone: bool = False,
) -> None:
verify_ninja_availability()
if IS_WINDOWS:
compiler = os.environ.get("CXX", "cl")
else:
compiler = get_dpcpp_complier()
check_compiler_abi_compatibility(compiler)
extra_ldflags = _prepare_ldflags(extra_ldflags or [], verbose, is_standalone)
extra_cflags = _prepare_compile_flags(extra_cflags)
build_file_path = os.path.join(build_directory, "build.ninja")
if verbose:
print(f"Emitting ninja build file {build_file_path}...")
# NOTE: Emitting a new ninja build file does not cause re-compilation if
# the sources did not change, so it's ok to re-emit (and it's fast).
_write_ninja_file_to_build_library(
path=build_file_path,
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
is_standalone=is_standalone,
)
if verbose:
print(f"Building extension module {name}...")
_run_ninja_build(
build_directory, verbose, error_prefix=f"Error building extension '{name}'"
)
def get_one_api_help():
oneAPI = _one_api_help()
return oneAPI
def include_paths() -> List[str]:
"""
Get the include paths required to build a DPC++ extension.
Returns:
A list of include path strings.
"""
# add pytorch include directories
paths = []
paths += get_pytorch_include_dir()
# add oneAPI include directories
paths += get_one_api_help().get_include_dirs()
return paths
def library_paths() -> List[str]:
paths = []
paths += get_pytorch_lib_dir()
paths += get_one_api_help().get_library_dirs()
return paths
def _prepare_compile_flags(extra_compile_args):
if isinstance(extra_compile_args, List):
extra_compile_args.append("-fsycl")
elif isinstance(extra_compile_args, dict):
cl_flags = extra_compile_args.get("cxx", [])
cl_flags.append("-fsycl")
extra_compile_args["cxx"] = cl_flags
return extra_compile_args
def _prepare_ldflags(extra_ldflags, verbose, is_standalone):
if IS_WINDOWS:
python_path = os.path.dirname(sys.executable)
python_lib_path = os.path.join(python_path, "libs")
extra_ldflags.append("c10.lib")
extra_ldflags.append("torch.lib")
extra_ldflags.append(f"/LIBPATH:{TORCH_LIB_PATH}")
if not is_standalone:
extra_ldflags.append("torch_python.lib")
extra_ldflags.append(f"/LIBPATH:{python_lib_path}")
else:
extra_ldflags.append(f"-L{TORCH_LIB_PATH}")
extra_ldflags.append("-lc10")
extra_ldflags.append("-ltorch")
if not is_standalone:
extra_ldflags.append("-ltorch_python")
if is_standalone and "TBB" in torch.__config__.parallel_info():
extra_ldflags.append("-ltbb")
if is_standalone:
extra_ldflags.append(f"-Wl,-rpath,{TORCH_LIB_PATH}")
library_dirs = library_paths()
# Append oneMKL link parameters, detailed please reference:
# https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-link-line-advisor.html
oneapi_link_args = []
oneapi_link_args += [f"-L{x}" for x in library_dirs]
# oneapi_link_args += ['-fsycl-device-code-split=per_kernel']
oneapi_link_args += ["-Wl,--start-group"]
oneapi_link_args += [f"{x}" for x in get_one_api_help().get_onemkl_libraries()]
oneapi_link_args += ["-Wl,--end-group"]
oneapi_link_args += ["-lsycl", "-lOpenCL", "-lpthread", "-lm", "-ldl"]
oneapi_link_args += ["-ldnnl"]
# Append IPEX link parameters.
oneapi_link_args += [f"-L{x}" for x in get_one_api_help().get_default_lib_dir()]
oneapi_link_args += ["-lintel-ext-pt-gpu"]
extra_ldflags += oneapi_link_args
return extra_ldflags
PLAT_TO_VCVARS = {
"win32": "x86",
"win-amd64": "x86_amd64",
}
def _get_num_workers(verbose: bool) -> Optional[int]:
max_jobs = os.environ.get("MAX_JOBS")
if max_jobs is not None and max_jobs.isdigit():
if verbose:
print(f"Using envvar MAX_JOBS ({max_jobs}) as the number of workers...")
return int(max_jobs)
if verbose:
print(
"Allowing ninja to set a default number of workers... "
"(overridable by setting the environment variable MAX_JOBS=N)"
)
return None
def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None:
command = ["ninja", "-v"]
num_workers = _get_num_workers(verbose)
if num_workers is not None:
command.extend(["-j", str(num_workers)])
env = os.environ.copy()
# Try to activate the vc env for the users
if IS_WINDOWS and "VSCMD_ARG_TGT_ARCH" not in env:
from setuptools import distutils
plat_name = distutils.util.get_platform()
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = distutils._msvccompiler._get_vc_env(plat_spec)
vc_env = {k.upper(): v for k, v in vc_env.items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
env = vc_env
try:
sys.stdout.flush()
sys.stderr.flush()
# Warning: don't pass stdout=None to subprocess.run to get output.
# subprocess.run assumes that sys.__stdout__ has not been modified and
# attempts to write to it by default. However, when we call _run_ninja_build
# from ahead-of-time cpp extensions, the following happens:
# 1) If the stdout encoding is not utf-8, setuptools detachs __stdout__.
# https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110
# (it probably shouldn't do this)
# 2) subprocess.run (on POSIX, with no stdout override) relies on
# __stdout__ not being detached:
# https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214
# To work around this, we pass in the fileno directly and hope that
# it is valid.
stdout_fileno = 1
subprocess.run(
command,
stdout=stdout_fileno if verbose else subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=build_directory,
check=True,
env=env,
)
except subprocess.CalledProcessError as e:
# Python 2 and 3 compatible way of getting the error object.
_, error, _ = sys.exc_info()
# error.output contains the stdout and stderr of the build attempt.
message = error_prefix
# `error` is a CalledProcessError (which has an `ouput`) attribute, but
# mypy thinks it's Optional[BaseException] and doesn't narrow
if hasattr(error, "output") and error.output: # type: ignore[union-attr]
message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr]
raise RuntimeError(message) from e
def _get_build_directory(name: str, verbose: bool) -> str:
root_extensions_directory = os.environ.get("TORCH_EXTENSIONS_DIR")
if root_extensions_directory is None:
root_extensions_directory = get_default_build_root()
# TODO: hard code as xpu. will check xpu_available when it is ready.
xpu_str = "xpu" # type: ignore[attr-defined]
python_version = f"py{sys.version_info.major}{sys.version_info.minor}"
build_folder = f"{python_version}_{xpu_str}"
root_extensions_directory = os.path.join(
root_extensions_directory, build_folder
)
if verbose:
print(f"Using {root_extensions_directory} as PyTorch extensions root...")
build_directory = os.path.join(root_extensions_directory, name)
if not os.path.exists(build_directory):
if verbose:
print(f"Creating extension directory {build_directory}...")
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
return build_directory
def _import_module_from_library(module_name, path, is_python_module):
filepath = os.path.join(path, f"{module_name}{LIB_EXT}")
if is_python_module:
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
spec = importlib.util.spec_from_file_location(module_name, filepath)
module = importlib.util.module_from_spec(spec)
assert isinstance(spec.loader, importlib.abc.Loader)
spec.loader.exec_module(module)
return module
else:
torch.ops.load_library(filepath)
def _write_ninja_file_to_build_library(
path, name, sources, extra_cflags, extra_ldflags, extra_include_paths, is_standalone
) -> None:
extra_cflags = [flag.strip() for flag in extra_cflags]
extra_ldflags = [flag.strip() for flag in extra_ldflags]
extra_include_paths = [flag.strip() for flag in extra_include_paths]
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
user_includes = [os.path.abspath(file) for file in extra_include_paths]
# include_paths() gives us the location of torch/extension.h
system_includes = include_paths()
# sysconfig.get_path('include') gives us the location of Python.h
# Explicitly specify 'posix_prefix' scheme on non-Windows platforms to workaround error on some MacOS
# installations where default `get_path` points to non-existing `/Library/Python/M.m/include` folder
python_include_path = sysconfig.get_path(
"include", scheme="nt" if IS_WINDOWS else "posix_prefix"
)
if python_include_path is not None:
system_includes.append(python_include_path)
# Windows does not understand `-isystem`.
if IS_WINDOWS:
user_includes += system_includes
system_includes.clear()
common_cflags = []
if not is_standalone:
common_cflags.append(f"-DTORCH_EXTENSION_NAME={name}")
common_cflags.append("-DTORCH_API_INCLUDE_EXTENSION_H")
# Note [Pybind11 ABI constants]
#
# Pybind11 before 2.4 used to build an ABI strings using the following pattern:
# f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_BUILD_TYPE}__"
# Since 2.4 compier type, stdlib and build abi parameters are also encoded like this:
# f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_COMPILER_TYPE}
# {PYBIND11_STDLIB}{PYBIND11_BUILD_ABI}{PYBIND11_BUILD_TYPE}__"
#
# This was done in order to further narrow down the chances of compiler ABI incompatibility
# that can cause a hard to debug segfaults.
# For PyTorch extensions we want to relax those restrictions and pass compiler, stdlib and abi properties
# captured during PyTorch native library compilation in torch/csrc/Module.cpp
for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
pval = getattr(torch._C, f"_PYBIND11_{pname}")
if pval is not None and not IS_WINDOWS:
common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
common_cflags += [f"-I{include}" for include in user_includes]
common_cflags += [f"-isystem {include}" for include in system_includes]
common_cflags += [
"-D_GLIBCXX_USE_CXX11_ABI=" + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))
]
if IS_WINDOWS:
cflags = common_cflags + COMMON_MSVC_FLAGS + extra_cflags
cflags = _nt_quote_args(cflags)
else:
cflags = common_cflags + ["-fPIC", "-std=c++17"] + extra_cflags
def object_file_path(source_file: str) -> str:
# '/path/to/file.cpp' -> 'file'
file_name = os.path.splitext(os.path.basename(source_file))[0]
target = f"{file_name}.o"
return target
objects = [object_file_path(src) for src in sources]
ldflags = ([] if is_standalone else [SHARED_FLAG]) + extra_ldflags
# The darwin linker needs explicit consent to ignore unresolved symbols.
if IS_MACOS:
ldflags.append("-undefined dynamic_lookup")
elif IS_WINDOWS:
ldflags = _nt_quote_args(ldflags)
ext = EXEC_EXT if is_standalone else LIB_EXT
library_target = f"{name}{ext}"
_write_ninja_file(
path=path,
cflags=cflags,
post_cflags=None,
sources=sources,
objects=objects,
ldflags=ldflags,
library_target=library_target,
)
def _jit_compile(
name,
sources,
extra_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
is_python_module,
is_standalone,
keep_intermediates=True,
) -> None:
if is_python_module and is_standalone:
raise ValueError(
"`is_python_module` and `is_standalone` are mutually exclusive."
)
old_version = JIT_EXTENSION_VERSIONER.get_version(name)
version = JIT_EXTENSION_VERSIONER.bump_version_if_changed(
name,
sources,
build_arguments=[extra_cflags, extra_ldflags, extra_include_paths],
build_directory=build_directory,
with_cuda=False,
is_python_module=is_python_module,
is_standalone=is_standalone,
)
if version > 0:
if version != old_version and verbose:
print(
f"The input conditions for extension module {name} have changed. "
+ f"Bumping to version {version} and re-building as {name}_v{version}..."
)
name = f"{name}_v{version}"
if version != old_version:
baton = FileBaton(os.path.join(build_directory, "lock"))
if baton.try_acquire():
try:
with GeneratedFileCleaner(
keep_intermediates=keep_intermediates
) as clean_ctx:
_write_ninja_file_and_build_library(
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
build_directory=build_directory,
verbose=verbose,
is_standalone=is_standalone,
)
finally:
baton.release()
else:
baton.wait()
elif verbose:
print(
"No modifications detected for re-loaded extension "
f"module {name}, skipping build step..."
)
if verbose:
print(f"Loading extension module {name}...")
if is_standalone:
return _get_exec_path(name, build_directory)
return _import_module_from_library(name, build_directory, is_python_module)
def load(
name,
sources: Union[str, List[str]],
extra_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
is_python_module=True,
is_standalone=False,
keep_intermediates=True,
):
r"""
Loads a intel_extension_for_pytorch DPC++ extension just-in-time (JIT).
To load an extension, a Ninja build file is emitted, which is used to
compile the given sources into a dynamic library. This library is
subsequently loaded into the current Python process as a module and
returned from this function, ready for use.
By default, the directory to which the build file is emitted and the
resulting library compiled to is ``<tmp>/torch_extensions/<name>``, where
``<tmp>`` is the temporary folder on the current platform and ``<name>``
the name of the extension. This location can be overridden in two ways.
First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it
replaces ``<tmp>/torch_extensions`` and all extensions will be compiled
into subfolders of this directory. Second, if the ``build_directory``
argument to this function is supplied, it overrides the entire path, i.e.
the library will be compiled into that folder directly.
To compile the sources, the default system compiler (``c++``) is used,
which can be overridden by setting the ``CXX`` environment variable. To pass
additional arguments to the compilation process, ``extra_cflags`` or
``extra_ldflags`` can be provided. For example, to compile your extension
with optimizations, pass ``extra_cflags=['-O3']``. You can also use
``extra_cflags`` to pass further include directories.
Args:
name: The name of the extension to build. This MUST be the same as the
name of the pybind11 module!
sources: A list of relative or absolute paths to C++ source files.
extra_cflags: optional list of compiler flags to forward to the build.
extra_ldflags: optional list of linker flags to forward to the build.
extra_include_paths: optional list of include directories to forward
to the build.
build_directory: optional path to use as build workspace.
verbose: If ``True``, turns on verbose logging of load steps.
is_python_module: If ``True`` (default), imports the produced shared
library as a Python module. If ``False``, behavior depends on
``is_standalone``.
is_standalone: If ``False`` (default) loads the constructed extension
into the process as a plain dynamic library. If ``True``, build a
standalone executable.
Returns:
If ``is_python_module`` is ``True``:
Returns the loaded PyTorch extension as a Python module.
If ``is_python_module`` is ``False`` and ``is_standalone`` is ``False``:
Returns nothing. (The shared library is loaded into the process as
a side effect.)
If ``is_standalone`` is ``True``.
Return the path to the executable. (On Windows, TORCH_LIB_PATH is
added to the PATH environment variable as a side effect.)
Example:
>>> from intel_extension_for_pytorch.xpu.cpp_extension import load
>>> module = load(
name='extension',
sources=['extension.cpp', 'extension_kernel.cpp'],
extra_cflags=['-O2'],
verbose=True)
"""
return _jit_compile(
name,
[sources] if isinstance(sources, str) else sources,
extra_cflags,
extra_ldflags,
extra_include_paths,
build_directory or _get_build_directory(name, verbose),
verbose,
is_python_module,
is_standalone,
keep_intermediates=keep_intermediates,
)
def _write_ninja_file(
path, cflags, post_cflags, sources, objects, ldflags, library_target
) -> None:
r"""Write a ninja file that does the desired compiling and linking.
`path`: Where to write this file
`cflags`: list of flags to pass to $cxx. Can be None.
`post_cflags`: list of flags to append to the $cxx invocation. Can be None.
`sources`: list of paths to source files
`objects`: list of desired paths to objects, one per source.
`ldflags`: list of flags to pass to linker. Can be None.
`library_target`: Name of the output library. Can be None; in that case,
we do no linking.
"""
def sanitize_flags(flags):
if flags is None:
return []
else:
return [flag.strip() for flag in flags]
cflags = sanitize_flags(cflags)
post_cflags = sanitize_flags(post_cflags)
ldflags = sanitize_flags(ldflags)
# Sanity checks...
assert len(sources) == len(objects)
assert len(sources) > 0
if IS_WINDOWS:
compiler = os.environ.get("CXX", "cl")
else:
compiler = get_dpcpp_complier()
# Version 1.3 is required for the `deps` directive.
config = ["ninja_required_version = 1.3"]
config.append(f"cxx = {compiler}")
flags = [f'cflags = {" ".join(cflags)}']
flags.append(f'post_cflags = {" ".join(post_cflags)}')
flags.append(f'ldflags = {" ".join(ldflags)}')
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
sources = [os.path.abspath(file) for file in sources]
# See https://ninja-build.org/build.ninja.html for reference.
compile_rule = ["rule compile"]
if IS_WINDOWS:
compile_rule.append(
" command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags"
)
compile_rule.append(" deps = msvc")
else:
compile_rule.append(
" command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags"
)
compile_rule.append(" depfile = $out.d")
compile_rule.append(" deps = gcc")
# Emit one build rule per source to enable incremental build.
build = []
for source_file, object_file in zip(sources, objects):
rule = "compile"
if IS_WINDOWS:
source_file = source_file.replace(":", "$:")
object_file = object_file.replace(":", "$:")
source_file = source_file.replace(" ", "$ ")
object_file = object_file.replace(" ", "$ ")
build.append(f"build {object_file}: {rule} {source_file}")
if library_target is not None:
link_rule = ["rule link"]
if IS_WINDOWS:
cl_paths = (
subprocess.check_output(["where", "cl"])
.decode(*SUBPROCESS_DECODE_ARGS)
.split("\r\n")
)
if len(cl_paths) >= 1:
cl_path = os.path.dirname(cl_paths[0]).replace(":", "$:")
else:
raise RuntimeError("MSVC is required to load C++ extensions")
link_rule.append(
f' command = "{cl_path}/link.exe" $in /nologo $ldflags /out:$out'
)
else:
link_rule.append(" command = $cxx $in $ldflags -o $out")
link = [f'build {library_target}: link {" ".join(objects)}']
default = [f"default {library_target}"]
else:
link_rule, link, default = [], [], []
# 'Blocks' should be separated by newlines, for visual benefit.
blocks = [config, flags, compile_rule]
blocks += [link_rule, build, link, default]
with open(path, "w") as build_file:
for block in blocks:
lines = "\n".join(block)
build_file.write(f"{lines}\n\n")
def _get_dpcpp_root():
# TODO: Need to decouple with toolchain env scripts
dpcpp_root = os.getenv("CMPLR_ROOT")
return dpcpp_root
def _get_onemkl_root():
# TODO: Need to decouple with toolchain env scripts
path = os.getenv("MKLROOT")
return path
def _get_onednn_root():
# TODO: Need to decouple with toolchain env scripts
path = os.getenv("DNNLROOT")
return path
class _one_api_help:
__dpcpp_root = None
__onemkl_root = None
__onednn_root = None
__default_root = None
def __init__(self):
self.__dpcpp_root = _get_dpcpp_root()
self.__onemkl_root = _get_onemkl_root()
self.__onednn_root = _get_onednn_root()
CUR_DIR = os.path.dirname(__file__)
self.__default_root = os.path.dirname(CUR_DIR)
self.check_onednn_cfg()
self.check_dpcpp_cfg()
self.check_onemkl_cfg()
def check_onemkl_cfg(self):
if self.__onemkl_root is None:
raise "Didn't detect mkl root. Please source <oneapi_dir>/mkl/<version>/env/vars.sh "
def check_onednn_cfg(self):
if self.__onednn_root is None:
raise "Didn't detect dnnl root. Please source <oneapi_dir>/dnnl/<version>/env/vars.sh "
else:
warnings.warn(
"This extension has static linked onednn library. Please attaction to \
that, this path of onednn version maybe not match with the built-in version."
)
def check_dpcpp_cfg(self):
if self.__dpcpp_root is None:
raise "Didn't detect dpcpp root. Please source <oneapi_dir>/compiler/<version>/env/vars.sh "
def get_default_include_dir(self):
return [os.path.join(self.__default_root, "include")]
def get_default_lib_dir(self):
return [os.path.join(self.__default_root, "lib")]
def get_dpcpp_include_dir(self):
return [
os.path.join(self.__dpcpp_root, "linux", "include"),
os.path.join(self.__dpcpp_root, "linux", "include", "sycl"),
]
def get_onemkl_include_dir(self):
return [os.path.join(self.__onemkl_root, "include")]
def get_onednn_include_dir(self):
return [os.path.join(self.__onednn_root, "include")]
def get_onednn_lib_dir(self):
return [os.path.join(self.__onednn_root, "lib")]
def is_onemkl_ready(self):
if self.__onemkl_root is None:
return False
return True
def is_onednn_ready(self):
if self.__onednn_root is None:
return False
return True
def get_library_dirs(self):
library_dirs = []
library_dirs += [f"{x}" for x in self.get_default_lib_dir()]
library_dirs += [f"{x}" for x in self.get_onednn_lib_dir()]
return library_dirs
def get_include_dirs(self):
include_dirs = []
include_dirs += [f"{x}" for x in self.get_dpcpp_include_dir()]
include_dirs += [f"{x}" for x in self.get_onemkl_include_dir()]
include_dirs += [f"{x}" for x in self.get_onednn_include_dir()]
include_dirs += [f"{x}" for x in self.get_default_include_dir()]
return include_dirs
def get_onemkl_libraries(self):
MKLROOT = self.__onemkl_root
return [
f"{MKLROOT}/lib/intel64/libmkl_sycl.a",
f"{MKLROOT}/lib/intel64/libmkl_intel_ilp64.a",
f"{MKLROOT}/lib/intel64/libmkl_sequential.a",
f"{MKLROOT}/lib/intel64/libmkl_core.a",
]
def get_pytorch_include_dir():
lib_include = os.path.join(_TORCH_PATH, "include")
paths = [
lib_include,
# Remove this once torch/torch.h is officially no longer supported for C++ extensions.
os.path.join(lib_include, "torch", "csrc", "api", "include"),
# Some internal (old) Torch headers don't properly prefix their includes,
# so we need to pass -Itorch/lib/include/TH as well.
os.path.join(lib_include, "TH"),
]
return paths
def get_pytorch_lib_dir():
return [os.path.join(_TORCH_PATH, "lib")]
def DPCPPExtension(name, sources, *args, **kwargs):
r"""
Creates a :class:`setuptools.Extension` for DPCPP/C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a DPCPP/C++
extension.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor.
Example:
>>> from intel_extension_for_pytorch.xpu.utils import DpcppBuildExtension, DPCPPExtension
>>> setup(
name='dpcpp_extension',
ext_modules=[
DPCPPExtension(
name='dpcpp_extension',
sources=['extension.cpp', 'extension_kernel.cpp'],
extra_compile_args={'cxx': ['-g', '-std=c++20', '-fPIC']})
],
cmdclass={
'build_ext': DpcppBuildExtension
})
"""
library_dirs = kwargs.get("library_dirs", [])
library_dirs += library_paths()
kwargs["library_dirs"] = library_dirs
libraries = kwargs.get("libraries", [])
libraries.append("c10")
libraries.append("torch")
libraries.append("torch_cpu")
libraries.append("torch_python")
# Append oneDNN link parameters.
libraries.append("dnnl")
kwargs["libraries"] = libraries
include_dirs = kwargs.get("include_dirs", [])
include_dirs += include_paths()
kwargs["include_dirs"] = include_dirs
kwargs["language"] = "c++"
extra_compile_args = kwargs.get("extra_compile_args", {})
extra_link_args = kwargs.get("extra_link_args", [])
# add oneapi link parameters
extra_link_args = _prepare_ldflags(extra_link_args, False, False)
extra_compile_args = _prepare_compile_flags(extra_compile_args)
# todo: add dpcpp parameter support.
kwargs["extra_link_args"] = extra_link_args
kwargs["extra_compile_args"] = extra_compile_args
return setuptools.Extension(name, sources, *args, **kwargs)
| 55,241 | 35.901804 | 130 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/__init__.py | r"""
This package is lazily initialized, so you can always import it.
"""
from torch import serialization
from torch.storage import _StorageBase
import sys
from typing import List, Optional, Tuple, Union, Dict
import torch
import intel_extension_for_pytorch
from .lazy_init import _lazy_init, _lazy_call
from torch import device as _device
from torch._utils import classproperty
from ._proxy_module import *
from .streams import Stream, Event
from .intrinsic import *
from .cpp_extension import *
from .amp import *
from .utils import *
from .random import *
from .memory import *
from ..utils.channels_last_1d import is_contiguous_channels_last_1d, to_channels_last_1d
from .overrides import (
override_tensor_totype,
override_assert_equal,
override_get_stream,
override_recursive_to,
)
from .generator import Generator
from torch._utils import _get_device_index
import intel_extension_for_pytorch.optim as optim
from intel_extension_for_pytorch._version import (
__version__,
__ipex_gitrev__,
__torch_gitrev__,
__gpu_onednn_gitrev__,
__build_type__,
) # noqa B950
default_generators: Tuple[torch._C.Generator] = ()
_device_t = Union[_device, str, int]
def is_initialized():
r"""Returns whether XPU state has been initialized."""
from .lazy_init import _initialized
return _initialized
def init():
r"""Initialize the XPU's state. This is a Python API about lazy initialization
that avoids initializing XPU until the first time it is accessed. You may need
to call this function explicitly in very rare cases, since IPEX could call
this initialization automatically when XPU functionality is on-demand.
Does nothing if call this function repeatedly.
"""
_lazy_init()
# This API call _prefetchDeviceCount() if _lazy_init() has not been called such that
# this API can be used before forking proces.
def device_count() -> int:
r"""Returns the number of XPUs device available."""
if hasattr(intel_extension_for_pytorch._C, "_getDeviceCount"):
if is_initialized():
return intel_extension_for_pytorch._C._getDeviceCount()
else:
return intel_extension_for_pytorch._C._prefetchDeviceCount()
else:
return 0
# This API can be used before forking process if _lazy_init() has not been called.
def is_available() -> bool:
r"""Returns a bool indicating if XPU is currently available."""
# This function device_count() never throws and returns 0 if driver is missing
# or can't be initialized
return device_count() > 0
# This API can be used before forking process if _lazy_init() has not been called.
def getDeviceIdListForCard(card_id=-1) -> list:
r"""Returns the device list of card_id.
By default, return device list of the card which contains max number of devices."""
if hasattr(intel_extension_for_pytorch._C, "_getDeviceIdListForCard"):
if is_initialized():
return intel_extension_for_pytorch._C._getDeviceIdListForCard(card_id)
else:
return intel_extension_for_pytorch._C._prefetchDeviceIdListForCard(card_id)
else:
return []
class device(object):
r"""Context-manager that changes the selected device.
Arguments:
device (torch.device or int): device index to select. It's a no-op if
this argument is a negative integer or ``None``.
"""
def __init__(self, device):
self.idx = _get_device_index(device, optional=True)
self.prev_idx = -1
def __enter__(self):
if self.idx == -1:
return
self.prev_idx = intel_extension_for_pytorch._C._getDevice()
if self.prev_idx != self.idx:
intel_extension_for_pytorch._C._setDevice(self.idx)
if not torch.jit.is_scripting():
_lazy_init()
def __exit__(self, *args):
if self.prev_idx != self.idx:
intel_extension_for_pytorch._C._setDevice(self.prev_idx)
return False
class device_of(device):
r"""Context-manager that changes the current device to that of given object.
You can use both tensors and storages as arguments. If a given object is
not allocated on a GPU, this is a no-op.
Arguments:
obj (Tensor or Storage): object allocated on the selected device.
"""
def __init__(self, obj):
idx = obj.get_device() if obj.is_xpu else -1
super(device_of, self).__init__(idx)
def set_device(device: _device_t) -> None:
r"""Sets the current device.
Usage of this function is discouraged in favor of :any:`device`. In most
cases it's better to use ``xpu_VISIBLE_DEVICES`` environmental variable.
Arguments:
device (torch.device or int): selected device. This function is a no-op
if this argument is negative.
"""
device = _get_device_index(device)
if device >= 0:
intel_extension_for_pytorch._C._setDevice(device)
def get_device_name(device: Optional[_device_t] = None) -> str:
r"""Gets the name of a device.
Arguments:
device (torch.device or int, optional): device for which to return the
name. This function is a no-op if this argument is a negative
integer. It uses the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
"""
return get_device_properties(device).name
def get_device_capability(device: Optional[_device_t] = None) -> Dict[str, Any]:
r"""Gets the xpu capability of a device.
Args:
device (torch.device or int, optional): device for which to return the
device capability. It uses the current device, given by
:func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
(default).
Returns:
Dict[str, Any]: the xpu capability dictionary of the device
"""
prop = get_device_properties(device)
return {
"max_work_group_size": prop.max_work_group_size,
"max_num_sub_groups": prop.max_num_sub_groups,
"sub_group_sizes": prop.sub_group_sizes,
}
def get_device_properties(device: _device_t):
r"""Gets the xpu properties of a device.
Arguments:
device (torch.device or int, optional): device for which to return the
device properties. It uses the current device, given by
:func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
(default).
Returns:
_DeviceProperties: the properties of the device
"""
_lazy_init() # will define _get_device_properties
device = _get_device_index(device, optional=True)
if device < 0 or device >= device_count():
raise AssertionError("Invalid device id")
return intel_extension_for_pytorch._C._get_device_properties(device)
def current_device() -> int:
r"""Returns the index of a currently selected device."""
# lazy initialization occurs in _getDevice
return intel_extension_for_pytorch._C._getDevice()
def synchronize(device: _device_t = None) -> None:
r"""Waits for all kernels in all streams on a XPU device to complete.
Arguments:
device (torch.device or int, optional): device for which to synchronize.
It uses the current device, given by :func:`~torch.xpu.current_device`,
if :attr:`device` is ``None`` (default).
"""
_lazy_init()
idx = _get_device_index(device, optional=True)
return intel_extension_for_pytorch._C._synchronize(idx)
class StreamContext(object):
r"""Context-manager that selects a given stream.
All XPU kernels queued within its context will be enqueued on a selected
stream.
Args:
Stream (Stream): selected stream. This manager is a no-op if it's
``None``.
.. note:: Streams are per-device.
"""
cur_stream: Optional["Stream"]
def __init__(self, stream: Optional["Stream"]):
self.stream = stream
self.idx = _get_device_index(None, True)
if not torch.jit.is_scripting():
if self.idx is None:
self.idx = -1
self.src_prev_stream = None
self.dst_prev_stream = None
def __enter__(self):
# Local cur_stream variable for type refinement
cur_stream = self.stream
# Return if stream is None or XPU device not available
if cur_stream is None or self.idx == -1:
return
self.src_prev_stream = current_stream(None)
# If the stream is not on the current device, then
# set the current stream on the device
if self.src_prev_stream.device != cur_stream.device:
with device(cur_stream.device):
self.dst_prev_stream = current_stream(cur_stream.device)
set_stream(cur_stream)
def __exit__(self, type: Any, value: Any, traceback: Any):
# Local cur_stream variable for type refinement
cur_stream = self.stream
# If stream is None or no XPU device available, return
if cur_stream is None or self.idx == -1:
return
# Reset the stream on the original device
# and destination device
if self.src_prev_stream.device != cur_stream.device:
set_stream(self.dst_prev_stream)
set_stream(self.src_prev_stream)
def stream(stream: Optional["Stream"]) -> StreamContext:
r"""Wrapper around the Context-manager StreamContext that
selects a given stream.
Arguments:
stream (Stream): selected stream. This manager is a no-op if it's
``None``.
.. note:: Streams are per-device. If the selected stream is not on the
current device, this function will also change the current device to
match the stream.
"""
return StreamContext(stream)
def set_stream(stream: Stream):
r"""Sets the current stream.This is a wrapper API to set the stream.
Usage of this function is discouraged in favor of the ``stream``
context manager.
Args:
stream (Stream): selected stream. This function is a no-op
if this argument is ``None``.
"""
if stream is None:
return
intel_extension_for_pytorch._C._setCurrentStream(stream._cdata)
def current_stream(device: Optional[_device_t] = None) -> Stream:
r"""Returns the currently selected :class:`Stream` for a given device.
Arguments:
device (torch.device or int, optional): selected device. Returns
the currently selected :class:`Stream` for the current device, given
by :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
(default).
"""
_lazy_init()
return Stream(
_cdata=intel_extension_for_pytorch._C._getCurrentStream(
_get_device_index(device, optional=True)
)
)
from torch.storage import _LegacyStorage
@staticmethod # type: ignore[misc]
def _lazy_new(cls, *args, **kwargs):
_lazy_init()
# We may need to call lazy init again if we are a forked child
# del _XPUBase.__new__
return super(_XPUBase, cls).__new__(cls, *args, **kwargs)
class _XPUBase(object):
is_xpu = True
is_sparse = False
def type(self, *args, **kwargs):
# We could use a Protocol here to tell mypy that self has `get_device` method
# but it is only available in the typing module on Python >= 3.8
# or on typing_extensions module on Python >= 3.6
with device(self.get_device()): # type: ignore[attr-defined]
return super(_XPUBase, self).type(*args, **kwargs) # type: ignore[misc]
__new__ = _lazy_new
class _XPULegacyStorage(_LegacyStorage):
@classmethod
def from_buffer(cls, *args, **kwargs):
raise RuntimeError("from_buffer: Not available for XPU storage")
@classmethod
def _new_with_weak_ptr(cls, *args, **kwargs):
raise RuntimeError("_new_with_weak_ptr: Not available for XPU storage")
@classmethod
def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None):
raise RuntimeError("_new_shared_filename: Not available for XPU storage")
class ByteStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.uint8
class DoubleStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.double
class FloatStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.float
class HalfStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.half
class LongStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.long
class IntStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.int
class ShortStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.short
class CharStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.int8
class BoolStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.bool
class BFloat16Storage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.bfloat16
class ComplexDoubleStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.cdouble
class ComplexFloatStorage(_XPULegacyStorage):
@classproperty
def dtype(self):
return torch.cfloat
del _LegacyStorage
del _XPULegacyStorage
torch._storage_classes.add(DoubleStorage)
torch._storage_classes.add(FloatStorage)
torch._storage_classes.add(LongStorage)
torch._storage_classes.add(IntStorage)
torch._storage_classes.add(ShortStorage)
torch._storage_classes.add(CharStorage)
torch._storage_classes.add(ByteStorage)
torch._storage_classes.add(HalfStorage)
torch._storage_classes.add(BoolStorage)
torch._storage_classes.add(BFloat16Storage)
torch._storage_classes.add(ComplexDoubleStorage)
torch._storage_classes.add(ComplexFloatStorage)
def _xpu_tag(obj):
if obj.device.type == "xpu":
return "xpu:" + str(obj.device.index)
def validate_xpu_device(location):
device = _get_device_index(location, True)
if not torch.xpu.is_available():
raise RuntimeError(
"Attempting to deserialize object on a xpu "
"device but torch.xpu.is_available() is False. "
"If you are running on a CPU-only machine, "
"please use torch.load with map_location=torch.device('cpu') "
"to map your storages to the CPU."
)
device_count = torch.xpu.device_count()
if device >= device_count:
raise RuntimeError(
"Attempting to deserialize object on xpu device "
f"{device} but torch.xpu.device_count() is {device_count}. Please use "
"torch.load with map_location to map your storages "
"to an existing device."
)
return device
current_module = sys.modules[__name__]
def _xpu(self, device=None, non_blocking=False, **kwargs):
"""Returns a copy of this object in xpu memory.
If this object is already in xpu memory and on the correct device, then
no copy is performed and the original object is returned.
Args:
device (int): The destination GPU id. Defaults to the current device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument.
"""
non_blocking = torch._utils._get_async_or_non_blocking("xpu", non_blocking, kwargs)
# if self.is_xpu:
# if device is None:
# device = torch.xpu.current_device()
# if self.get_device() == device:
# return self
# else:
if device is None:
device = -1
with torch.xpu.device(device):
if self.is_sparse:
# new_type = getattr(torch.xpu.sparse, self.__class__.__name__)
# indices = torch._indices(self).xpu(device, non_blocking)
# values = torch._values(self).xpu(device, non_blocking)
# return new_type(indices, values, self.size())
pass
else:
untyped_storage = torch.UntypedStorage(
self.size(), device=torch.device("xpu")
)
untyped_storage.copy_(self, non_blocking)
return untyped_storage
def _xpu_deserialize(obj, location):
if location.startswith("xpu"):
device_id = validate_xpu_device(location)
if getattr(obj, "_torch_load_uninitialized", False):
with torch.xpu.device(device):
return torch.UntypedStorage(obj.nbytes(), device=torch.device(location))
else:
return _xpu(obj, device=device_id)
def get_device_type() -> str:
return "xpu"
_StorageBase.xpu = _xpu
serialization.register_package(30, _xpu_tag, _xpu_deserialize)
torch._register_device_module("xpu", current_module)
# post initial
if hasattr(intel_extension_for_pytorch._C, "_postInitExtension"):
intel_extension_for_pytorch._C._postInitExtension()
# class FloatTensor:
# def __new__(cls, e):
# return torch.tensor(e, device='xpu', dtype=torch.float)
# class DoubleTensor:
# def __new__(cls, e):
# return torch.tensor(e, device='xpu', dtype=torch.float64)
if intel_extension_for_pytorch._C._has_xpu():
if is_available():
override_get_stream()
override_recursive_to()
if not has_fp64_dtype():
override_tensor_totype()
exec_path = sys.argv[0].split("/")
if len(exec_path) > 0 and "pytest" in exec_path:
override_assert_equal()
| 17,811 | 30.637655 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/_proxy_module.py | import torch
import intel_extension_for_pytorch._C
# utils function to define base object proxy
def _proxy_module(name: str) -> type:
def init_err(self):
class_name = self.__class__.__name__
raise RuntimeError(
"Tried to instantiate proxy base class {}.".format(class_name)
+ "\nIntel_extension_for_pytorch not compiled with XPU enabled."
)
return type(name, (object,), {"__init__": init_err})
def _register_proxy(module: str):
if not hasattr(intel_extension_for_pytorch._C, module):
intel_extension_for_pytorch._C.__dict__[module] = _proxy_module(module)
def _register_proxy_ops(module: str):
if not hasattr(torch.ops.torch_ipex, module):
torch.ops.torch_ipex.__dict__[module] = _proxy_module(module)
class proxy_math_mode(object):
FP32 = -1
TF32 = -2
BF32 = -3
class proxy_compute_eng(object):
RECOMMEND = -1
BASIC = -2
ONEDNN = -3
ONEMKL = -4
XETLA = -5
# --- [ CPU proxys:
_register_proxy_ops("interaction_forward")
if not hasattr(intel_extension_for_pytorch._C, "FP32MathMode"):
intel_extension_for_pytorch._C.__dict__["FP32MathMode"] = proxy_math_mode
# --- [ XPU proxys:
_register_proxy("ShortStorageBase")
_register_proxy("CharStorageBase")
_register_proxy("IntStorageBase")
_register_proxy("LongStorageBase")
_register_proxy("BoolStorageBase")
_register_proxy("HalfStorageBase")
_register_proxy("DoubleStorageBase")
_register_proxy("FloatStorageBase")
_register_proxy("BFloat16StorageBase")
_register_proxy("QUInt8StorageBase")
_register_proxy("QInt8StorageBase")
_register_proxy("_XPUStreamBase")
_register_proxy("_XPUEventBase")
if not hasattr(intel_extension_for_pytorch._C, "XPUFP32MathMode"):
intel_extension_for_pytorch._C.__dict__["XPUFP32MathMode"] = proxy_math_mode
if not hasattr(intel_extension_for_pytorch._C, "XPUComputeEng"):
intel_extension_for_pytorch._C.__dict__["XPUComputeEng"] = proxy_compute_eng
| 1,971 | 26.774648 | 80 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/generator.py | # coding: utf-8
import torch
import intel_extension_for_pytorch
# This is a WA. We will submit a PR to stock-PyTorch and make XPU backend
# supported in torch.Generator() API.
class Generator(torch._C.Generator):
def __new__(cls, device=None):
return intel_extension_for_pytorch._C.generator_new(device)
| 318 | 28 | 73 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/overrides.py | import torch
import intel_extension_for_pytorch # noqa F401
from functools import wraps
from torch.nn.parallel.scatter_gather import _is_namedtuple
def override_tensor_totype():
r"""Override _tensor_totype to avoid triggering fp64 error when printing XPU tensor on ATS-M"""
def fp64_tensor_totype_wrapper(f):
@wraps(f)
def wrapper(*args, **kwargs):
for arg in args:
if torch.is_tensor(arg) and arg.is_xpu:
return arg.to(torch.float)
for k, kwarg in kwargs.items():
if torch.is_tensor(kwarg) and kwarg.is_xpu:
return kwarg.to(torch.float)
return f(*args, **kwargs)
return wrapper
torch._tensor_str.tensor_totype = fp64_tensor_totype_wrapper(
torch._tensor_str.tensor_totype
)
def override_assert_equal():
r"""Override assertEqual to avoid triggering fp64 error on tensor comparison in test case"""
def args_to_xpu(args):
if torch.is_tensor(args) and args.is_xpu:
return args.to("cpu")
elif isinstance(args, (tuple, list)):
args_list = list(args)
for i, arg in enumerate(args_list):
args_list[i] = args_to_xpu(arg)
if isinstance(args, tuple):
return tuple(args_list)
else:
return args_list
else:
return args
def fp64_assert_equal_wrapper(f):
@wraps(f)
def wrapper(*args, **kwargs):
args = args_to_xpu(args)
return f(*args, **kwargs)
return wrapper
r"""
In PyTorch design, `__allow_nonbracketed_mutation_flag` is a flag to forbid bare assignment
to torch.backends.<cudnn|mkldnn>.enabled and friends when running test suite. This flag will
be forced to set to False by function `disable_global_flags` which is defined in
torch.testing._internal.common_utils when overriding TestCase.assertEqual. It may result in
a runtime error on subsequent cudnn|mkldnn setting, if any. The function here is to override
`disable_global_flags` with an empty one to keep the flag `__allow_nonbracketed_mutation_flag`
from being changed.
"""
def _disable_global_flags():
pass
torch.backends.disable_global_flags = _disable_global_flags
from torch.testing._internal.common_utils import TestCase
TestCase.assertEqual = fp64_assert_equal_wrapper(TestCase.assertEqual)
# background streams used for copying
_streams = None
def override_get_stream():
r"""
This function overrides `_get_stream` in PyTorch to provide XPU support.
"""
def _get_stream(device: int):
r"""
Gets a background stream for copying between CPU and XPU.
"""
global _streams
if device == -1:
return None
if _streams is None:
_streams = [None] * torch.xpu.device_count()
if _streams[device] is None:
_streams[device] = torch.xpu.Stream(device)
return _streams[device]
torch.nn.parallel._functions._get_stream = _get_stream
return _get_stream
def override_recursive_to():
r"""
This function overrides `_recursive_to` in PyTorch to provide XPU support for data movement.
"""
def _recursive_to(inputs, target_gpu, use_side_stream_for_tensor_copies):
r"""
Recursively moves input to the target_gpu, used in XPU distributed training.
"""
def to_map(obj):
if isinstance(obj, torch.Tensor):
if obj.device == torch.device("xpu", target_gpu):
return (obj,)
if not use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
# Perform CPU -> GPU copies in a background stream. This code is
# motivated from similar logic in torch/nn/parallel/_functions.py
_get_stream = override_get_stream()
stream = _get_stream(target_gpu)
with torch.xpu.stream(stream):
output = obj.to(target_gpu)
# synchronize with the copy stream
with torch.xpu.device(target_gpu):
current_stream = torch.xpu.current_stream()
# Sync the current stream with the copy stream
current_stream.wait_stream(stream)
# nsure tensor memory is not reused until work on
# main stream is complete
output.record_stream(current_stream)
return (output,)
if _is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
# Avoid reference cycle
try:
res = to_map(inputs)
finally:
to_map = None # type: ignore[assignment]
return res
torch.distributed.utils._recursive_to = _recursive_to
class WrapAPI:
user_defined_src_dtype = None
user_defined_dst_dtype = None
only_device = False
@classmethod
def wrap_api_to(cls, api):
@wraps(api)
def new_api(*args, **kwargs):
new_args = list(args)
assert isinstance(
args[0], torch.Tensor
), "torch.Tensor.to wrapper got non-Tensor for the 1st argument"
src_dtype = args[0].dtype
src_device = args[0].device
dst_dtype = kwargs.get("dtype")
dst_device = kwargs.get("device")
if dst_dtype is None and dst_device is None:
if len(args) > 1 and isinstance(
args[1], torch.Tensor
): # torch.Tensor.to(other, ...)
kwargs["dtype"] = args[1].dtype
kwargs["device"] = args[1].device
new_args.pop(1)
elif len(args) > 1 and isinstance(
args[1], torch.dtype
): # torch.Tensor.to(dtype, ...)
kwargs["dtype"] = args[1]
kwargs["device"] = src_device
new_args.pop(1)
elif len(args) > 2 and isinstance(
args[2], torch.dtype
): # torch.Tensor.to(device, dtype, ...)
kwargs["dtype"] = args[2]
kwargs["device"] = args[1]
new_args.pop(2)
new_args.pop(1)
elif len(args) > 1: # torch.Tensor.to(device, ...)
kwargs["dtype"] = src_dtype
kwargs["device"] = args[1]
new_args.pop(1)
elif dst_device is None:
if len(args) > 1: # torch.Tensor.to(device, dtype=dtype, ...)
kwargs["device"] = args[1]
new_args.pop(1)
else: # torch.Tensor.to(dtype=dtype, ...)
kwargs["device"] = src_device
elif dst_dtype is None: # torch.Tensor.to(device=device, ...)
kwargs["dtype"] = src_dtype
else: # torch.Tensor.to(device=device, dtype=dtype, ...)
pass
new_args = tuple(new_args)
if cls.only_device and "xpu" not in str(kwargs["device"]):
pass
elif kwargs["dtype"] == cls.user_defined_src_dtype:
kwargs["dtype"] = cls.user_defined_dst_dtype
return api(*new_args, **kwargs)
return new_api
@classmethod
def wrap_api_create_size(cls, api):
@wraps(api)
def new_api(*args, **kwargs):
new_args = list(args)
dst_dtype = kwargs.get("dtype")
dst_device = kwargs.get("device")
if cls.only_device and "xpu" not in str(dst_device):
return api(*args, **kwargs)
if dst_dtype == cls.user_defined_src_dtype:
kwargs["dtype"] = cls.user_defined_dst_dtype
new_args = tuple(new_args)
return api(*new_args, **kwargs)
return new_api
@classmethod
def wrap_api_create_tensor(cls, api):
@wraps(api)
def new_api(*args, **kwargs):
new_args = list(args)
assert len(args) > 0 and isinstance(
args[0], torch.Tensor
), f"Current api {api} got non-Tensor for the 1st arguement"
dst_device = args[0].device
dst_dtype = args[0].dtype
resign_dtype = kwargs.get("dtype")
resign_dev = kwargs.get("device")
dst_device = resign_dev if resign_dev is not None else dst_device
dst_dtype = resign_dtype if resign_dtype is not None else dst_dtype
if cls.only_device and "xpu" not in str(dst_device):
return api(*args, **kwargs)
if dst_dtype == cls.user_defined_src_dtype:
kwargs["dtype"] = cls.user_defined_dst_dtype
new_args = tuple(new_args)
return api(*new_args, **kwargs)
return new_api
def convert_default_dtype(src_dtype, dst_dtype, only_device=False):
WrapAPI.user_defined_src_dtype = src_dtype
WrapAPI.user_defined_dst_dtype = dst_dtype
WrapAPI.only_device = only_device
# hack to
# The apis implicitly included by torch.to include:
torch.Tensor.to = WrapAPI.wrap_api_to(torch.Tensor.to)
torch.Tensor.xpu = WrapAPI.wrap_api_to(torch.Tensor.xpu)
# hack create size
torch.tensor = WrapAPI.wrap_api_create_size(torch.tensor)
torch.scalar_tensor = WrapAPI.wrap_api_create_size(torch.scalar_tensor)
torch.empty_quantized = WrapAPI.wrap_api_create_size(torch.empty_quantized)
torch.empty = WrapAPI.wrap_api_create_size(torch.empty)
torch.ones = WrapAPI.wrap_api_create_size(torch.ones)
torch.randint = WrapAPI.wrap_api_create_size(torch.randint)
torch.zeros = WrapAPI.wrap_api_create_size(torch.zeros)
torch.randn = WrapAPI.wrap_api_create_size(torch.randn)
torch.rand = WrapAPI.wrap_api_create_size(torch.rand)
torch.full = WrapAPI.wrap_api_create_size(torch.full)
torch.arange = WrapAPI.wrap_api_create_size(torch.arange)
torch.range = WrapAPI.wrap_api_create_size(torch.range)
torch.logspace = WrapAPI.wrap_api_create_size(torch.logspace)
torch.randperm = WrapAPI.wrap_api_create_size(torch.randperm)
torch.linspace = WrapAPI.wrap_api_create_size(torch.linspace)
torch.kaiser_window = WrapAPI.wrap_api_create_size(torch.kaiser_window)
torch.hamming_window = WrapAPI.wrap_api_create_size(torch.hamming_window)
torch.blackman_window = WrapAPI.wrap_api_create_size(torch.blackman_window)
torch.hann_window = WrapAPI.wrap_api_create_size(torch.hann_window)
torch.bartlett_window = WrapAPI.wrap_api_create_size(torch.bartlett_window)
torch.tril_indices = WrapAPI.wrap_api_create_size(torch.tril_indices)
torch.eye = WrapAPI.wrap_api_create_size(torch.eye)
torch.empty_strided = WrapAPI.wrap_api_create_size(torch.empty_strided)
torch.triu_indices = WrapAPI.wrap_api_create_size(torch.triu_indices)
# hack create from other tensor
torch.zeros_like = WrapAPI.wrap_api_create_tensor(torch.zeros_like)
torch.ones_like = WrapAPI.wrap_api_create_tensor(torch.ones_like)
torch.randn_like = WrapAPI.wrap_api_create_tensor(torch.randn_like)
torch.rand_like = WrapAPI.wrap_api_create_tensor(torch.rand_like)
torch.empty_like = WrapAPI.wrap_api_create_tensor(torch.empty_like)
torch.full_like = WrapAPI.wrap_api_create_tensor(torch.full_like)
torch.randint_like = WrapAPI.wrap_api_create_tensor(torch.randint_like)
torch.asarray = WrapAPI.wrap_api_create_tensor(torch.asarray)
torch.sparse_coo_tensor = WrapAPI.wrap_api_create_tensor(torch.sparse_coo_tensor)
| 12,238 | 40.208754 | 99 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/amp/autocast_mode.py | import torch
class autocast(torch.amp.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
``torch.xpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("xpu", args...)``
"""
def __init__(self, enabled=True, dtype=torch.bfloat16, cache_enabled=True):
super().__init__(
"xpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
)
| 401 | 27.714286 | 91 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/amp/__init__.py | from .autocast_mode import autocast
import intel_extension_for_pytorch
def get_autocast_xpu_dtype():
return intel_extension_for_pytorch._C.get_autocast_xpu_dtype()
def is_autocast_xpu_enabled():
return intel_extension_for_pytorch._C.is_autocast_xpu_enabled()
def set_autocast_xpu_enabled(enabled):
return intel_extension_for_pytorch._C.set_autocast_xpu_enabled(enabled)
def set_autocast_xpu_dtype(dtype):
return intel_extension_for_pytorch._C.set_autocast_xpu_dtype(dtype)
| 497 | 25.210526 | 75 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/intrinsic/__init__.py | import torch
from torch.nn.modules.utils import _pair
from torch import nn, Tensor
from torch.jit.annotations import BroadcastingList2
from typing import List, Union
from .modules import Interaction
import intel_extension_for_pytorch
__all__ = [
"Interaction",
"nms",
"locations_to_boxes",
"roi_align",
]
def MulAdd(input, other, accumu, alpha=1.0):
return torch.ops.torch_ipex.mul_add(input, other, accumu, alpha)
def nms(dets, scores, iou_threshold):
return torch.ops.torch_ipex.nms(dets, scores, iou_threshold)
def locations_to_boxes(locations, priors, center_variance, size_variance):
return torch.ops.torch_ipex.locations_to_boxes(
locations, priors, center_variance, size_variance
)
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
torch._assert(
_tensor.size(1) == 4,
"The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]",
)
elif isinstance(boxes, torch.Tensor):
torch._assert(
boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]"
)
else:
torch._assert(
False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]"
)
return
def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = _cat(list(boxes), dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def roi_align(
input: Tensor,
boxes: Union[Tensor, List[Tensor]],
output_size: BroadcastingList2[int],
spatial_scale: float = 1.0,
sampling_ratio: int = -1,
aligned: bool = False,
) -> Tensor:
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
return torch.ops.torch_ipex.roi_align(
input,
rois,
spatial_scale,
output_size[0],
output_size[1],
sampling_ratio,
aligned,
)
| 2,239 | 26.654321 | 97 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/xpu/intrinsic/modules/intrinsic.py | import torch
import intel_extension_for_pytorch # noqa F401
from torch.autograd import Function
class InteractionFuncion(Function):
@staticmethod
def forward(ctx, input_mlp, input_emb):
return torch.ops.torch_ipex.interaction(input_mlp, input_emb)
Interaction = InteractionFuncion.apply
| 308 | 22.769231 | 69 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/optim/_lars.py | import torch
from typing import Iterable
from torch import nn
"""
We recommend using create_optimizer_lars and setting bn_bias_separately=True
instead of using class Lars directly, which helps LARS skip parameters
in BatchNormalization and bias, and has better performance in general.
Polynomial Warmup learning rate decay is also helpful for better performance in general.
"""
def create_optimizer_lars(
model, lr, momentum, weight_decay, bn_bias_separately, epsilon
):
if bn_bias_separately:
optimizer = Lars(
[
dict(
params=get_common_parameters(
model, exclude_func=get_norm_bias_parameters
)
),
dict(params=get_norm_parameters(model), weight_decay=0, lars=False),
dict(
params=get_bias_parameters(model, exclude_func=get_norm_parameters),
lars=False,
),
],
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
epsilon=epsilon,
)
else:
optimizer = Lars(
model.parameters(),
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
epsilon=epsilon,
)
return optimizer
class Lars(torch.optim.Optimizer):
r"""Implements the LARS optimizer from `"Large batch training of convolutional networks"
<https://arxiv.org/pdf/1708.03888.pdf>`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate
momentum (float, optional): momentum factor (default: 0)
eeta (float, optional): LARS coefficient as used in the paper (default: 1e-3)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(
self,
params: Iterable[torch.nn.Parameter],
lr=1e-3,
momentum=0,
eeta=1e-3,
weight_decay=0,
epsilon=0.0,
) -> None:
if not isinstance(lr, float) or lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if eeta <= 0 or eeta > 1:
raise ValueError("Invalid eeta value: {}".format(eeta))
if epsilon < 0:
raise ValueError("Invalid epsilon value: {}".format(epsilon))
defaults = dict(
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
eeta=eeta,
epsilon=epsilon,
lars=True,
)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
# print("Using lars step?")
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
# print(len(group), group['lars'])
weight_decay = group["weight_decay"]
momentum = group["momentum"]
eeta = group["eeta"]
lr = group["lr"]
lars = group["lars"]
eps = group["epsilon"]
for index_p, p in enumerate(group["params"]):
if p.grad is None:
continue
decayed_grad = p.grad
scaled_lr = lr
if lars:
w_norm = torch.norm(p)
g_norm = torch.norm(p.grad)
trust_ratio = torch.where(
w_norm > 0 and g_norm > 0,
eeta * w_norm / (g_norm + weight_decay * w_norm + eps),
torch.ones_like(w_norm),
)
scaled_lr *= trust_ratio.item()
if weight_decay != 0:
decayed_grad = decayed_grad.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(
decayed_grad
).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(decayed_grad)
decayed_grad = buf
p.add_(decayed_grad, alpha=-scaled_lr)
# print("Finished a normal step")
return loss
"""
Functions which help to skip bias and BatchNorm
"""
BN_CLS = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
def get_parameters_from_cls(module, cls_):
def get_members_fn(m):
if isinstance(m, cls_):
return m._parameters.items()
else:
return dict()
named_parameters = module._named_members(get_members_fn=get_members_fn)
for name, param in named_parameters:
yield param
def get_norm_parameters(module):
return get_parameters_from_cls(module, (nn.LayerNorm, *BN_CLS))
def get_bias_parameters(module, exclude_func=None):
excluded_parameters = set()
if exclude_func is not None:
for param in exclude_func(module):
excluded_parameters.add(param)
for name, param in module.named_parameters():
if param not in excluded_parameters and "bias" in name:
yield param
def get_norm_bias_parameters(module):
for param in get_norm_parameters(module):
yield param
for param in get_bias_parameters(module, exclude_func=get_norm_parameters):
yield param
def get_common_parameters(module, exclude_func=None):
excluded_parameters = set()
if exclude_func is not None:
for param in exclude_func(module):
excluded_parameters.add(param)
for name, param in module.named_parameters():
if param not in excluded_parameters:
yield param
| 6,374 | 32.552632 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/optim/_optimizer_utils.py | import torch
import copy
import types
import warnings
from copy import deepcopy
from itertools import chain
from collections import defaultdict
from ._functional import (
sgd_step,
adagrad_step,
lamb_step,
adam_step,
adamw_step,
lars_step,
)
from ._lamb import Lamb
from ._lars import Lars
from ..nn import utils
IPEX_FUSED_OPTIMIZER_LIST_CPU = [
torch.optim.SGD,
torch.optim.Adagrad,
torch.optim.Adam,
Lamb,
Lars,
]
IPEX_FUSED_OPTIMIZER_LIST_XPU = [
torch.optim.SGD,
torch.optim.AdamW,
torch.optim.Adam,
Lamb,
]
OPTIMIZER_FUSED_STEP_MAPPING_CPU = {
torch.optim.SGD: sgd_step,
torch.optim.Adagrad: adagrad_step,
torch.optim.Adam: adam_step,
Lamb: lamb_step,
Lars: lars_step,
}
OPTIMIZER_FUSED_STEP_MAPPING_XPU = {
torch.optim.SGD: sgd_step,
torch.optim.AdamW: adamw_step,
torch.optim.Adam: adam_step,
Lamb: lamb_step,
Lars: lars_step,
}
def patch_zero_grad_for_master_weight_training(optimizer):
r"""
Patch "zero_grad" method of optimizer to support BFloat16 master weight training
Under master weight training case, the grad is actually on 'bf16_params' or 'fp16_params'.
So the 'zero_grad' should work on the 'bf16_params' or 'fp16_params' too.
"""
def zero_grad(self, set_to_none: bool = True):
for _, v in self.params_attr.items():
_param = v.parameter
if _param is None:
continue
if _param.grad is not None:
if set_to_none:
_param.grad = None
else:
if _param.grad.grad_fn is not None:
_param.grad.detach_()
else:
_param.grad.requires_grad_(False)
_param.grad.zero_()
self._original_zero_grad(set_to_none)
if not hasattr(optimizer, "_original_zero_grad"):
setattr(optimizer, "_original_zero_grad", optimizer.zero_grad) # noqa: B010
optimizer.zero_grad = types.MethodType(zero_grad, optimizer)
def patch_step_for_master_weight_training(optimizer):
r"""
Patch "step" method of optimizer to support master weight training
1.Convert BF16 or FP16 grad to FP32
2.Call original "step" to update parameters
3.Sync FP32 master weight back to BF16 or FP16 weight
"""
def master_param_non_fused_step(self, closure=None):
# convert bf16 or fp16 weight'grad to float.
for k, v in self.params_attr.items():
_param = v.parameter
if _param is None or _param is k:
continue
if _param.requires_grad and _param.grad is not None:
k.grad = _param.grad.detach().float()
loss = self._original_step(closure)
# sync mater weight to model's paramerter
for k, v in self.params_attr.items():
_param = v.parameter
if _param is None or _param is k:
continue
if k.device.type == "cpu":
if _param.dtype == torch.bfloat16:
torch.ops.torch_ipex.sync_master_weight_to_bf16(k, _param)
else:
assert _param.dtype == torch.float16
torch.ops.torch_ipex.sync_master_weight_to_fp16(k, _param)
elif k.device.type == "xpu":
_param.data = k.data.to(dtype=torch.bfloat16)
else:
pass
return loss
# Split master_param_non_fused_step into 2 steps:
# 1.Sync_grad: Convert grad to FP32
# 2.step_sync_weight: Call original "step" to update parameters and
# Sync FP32 master weight back to weight
# This is because gradscaler will unscale grad and
# it needs to sync grad to the FP32's grad first. After that gradscaler
# will update weight and it also needs to sync FP32 master weight back to weight.
def sync_grad(self):
for k, v in self.params_attr.items():
_param = v.parameter
if _param is None or _param is k:
continue
assert (
_param.dtype != torch.bfloat16
), "GradScaler is not recommended for bf16 training"
if _param.requires_grad:
k.grad = _param.grad.detach().float()
def step_sync_weight(self, closure=None):
loss = self._original_step(closure)
# sync mater weight to model's paramerter
for k, v in self.params_attr.items():
_param = v.parameter
if _param is None or _param is k:
continue
assert (
_param.dtype != torch.bfloat16
), "GradScaler is not recommended for bf16 training"
torch.ops.torch_ipex.sync_master_weight_to_fp16(k, _param)
return loss
if not hasattr(optimizer, "_original_step"):
setattr(optimizer, "_original_step", optimizer.step) # noqa: B010
optimizer.step = types.MethodType(master_param_non_fused_step, optimizer)
optimizer.sync_grad = types.MethodType(sync_grad, optimizer)
optimizer.step_sync_weight = types.MethodType(step_sync_weight, optimizer)
def pack_state(state, state_key, state_value, attr):
if attr.num_modules != 1:
return
m_cls = list(attr.modules_cls)[0]
if m_cls in utils._parameter_wrapper.IPEX_WEIGHT_PREPACK_MODULE_CPU():
if (
m_cls is torch.nn.Conv1d
or m_cls is torch.nn.Conv2d
or m_cls is torch.nn.Conv3d
):
if m_cls is torch.nn.Conv2d:
memory_format = torch.channels_last
elif m_cls is torch.nn.Conv3d:
memory_format = torch.channels_last_3d
else:
memory_format = torch.contiguous_format
value_temp = (
state_value.to(memory_format=memory_format)
if attr.weight_channels_last
else state_value
)
state[state_key] = attr.op_ctx.pack(value_temp)
else:
state[state_key] = attr.op_ctx.pack(state_value)
def patch_load_state_dict(optimizer):
r"""
Re-pack parameter state after load state_dict
"""
def repack(self):
for group in self.param_groups:
for _, p in enumerate(group["params"]):
if p in self.params_attr and p.device.type == "cpu":
attr = self.params_attr[p]
if attr.op_ctx is not None:
# weight attr need "op" info to pack state while bias attr not
state = self.state[p]
plain_format_shape = attr.plain_format_shape
for state_key, state_value in state.items():
if (
isinstance(state_value, torch.Tensor)
and state_value.size() == plain_format_shape
):
# We have an assumption here that any tensor's in parameter state, if they
# have same shapes with the parameter, they should share same layout with
# the parameter. Thus we need pack the state as we did to parameters.
pack_state(state, state_key, state_value, attr)
def original_load_state_dict_without_state_cast(self, state_dict):
r"""Loads the optimizer state.
Args:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
Copied from torch/optim/optimizer.py.
We need copy it here and change the behavior of state cast.
For example, in out bf16 training. The mumentum buffer should always
be float, but the original load_state_dict for optimizer will cast it to
bfloat16 which will loss accuracy
The original code:
def cast(param, value, key=None):
if isinstance(value, torch.Tensor):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
# Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424
if (key != "step"):
if param.is_floating_point():
value = value.to(param.dtype)
value = value.to(param.device)
return value
elif isinstance(value, dict):
return {k: cast(param, v, key=k) for k, v in value.items()}
elif isinstance(value, container_abcs.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
We change it to:
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = v
else:
state[k] = v
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of " "parameter groups"
)
param_lens = (len(g["params"]) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
# Update the state
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable((g["params"] for g in saved_groups)),
chain.from_iterable((g["params"] for g in groups)),
)
}
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = v
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group["params"] = group["params"]
return new_group
param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.__setstate__({"state": state, "param_groups": param_groups})
def load_state_dict(self, state_dict):
original_load_state_dict_without_state_cast(self, state_dict)
repack(self)
if not hasattr(optimizer, "_original_load_state_dict"):
setattr( # noqa: B010
optimizer, "_original_load_state_dict", optimizer.load_state_dict
)
optimizer.load_state_dict = types.MethodType(load_state_dict, optimizer)
def pack_optimizer_states(optimizer, param, attr):
"""
1. convert user's optimizer weights and related states to packed format
"""
if optimizer is None:
return
if param in optimizer.state:
state = optimizer.state[param]
plain_format_shape = attr.plain_format_shape
for state_key, state_value in state.items():
if (
isinstance(state_value, torch.Tensor)
and state_value.size() == plain_format_shape
):
# We have an assumption here that any tensor's in parameter state, if they
# have same shapes with the parameter, they should share same layout with
# the parameter. Thus we need pack the state as we did to parameters.
pack_state(state, state_key, state_value, attr)
def patch_state_dict(optimizer):
r"""
To support resume training.
Patch "state_dict" method to return unpacked/FP32 parameters/states
"""
def get_optimizer_unpacked_state_dict(self):
opt = self
opt_temp = copy.deepcopy(opt)
for (k1, _), (_, v2) in zip(opt.state.items(), opt_temp.state.items()):
if k1 in opt.params_attr:
attr = opt.params_attr[k1]
for state_key, state_value in v2.items():
if (
isinstance(state_value, torch.Tensor)
and state_value.shape == k1.shape
):
# We have an assumption here that any tensor's in parameter state, if they
# have same shapes with the parameter, they should share same layout with
# the parameter. Thus we need unpack the state as we did to parameters.
if attr.op_ctx is not None:
assert attr.num_modules == 1
state_value = attr.op_ctx.to_public(state_value)
v2[state_key] = state_value
return opt_temp.state_dict()
if not hasattr(optimizer, "_original_state_dict"):
setattr(optimizer, "_original_state_dict", optimizer.state_dict) # noqa: B010
optimizer.state_dict = types.MethodType(
get_optimizer_unpacked_state_dict, optimizer
)
def optimizer_fusion(optimizer, master_weight_split, device_type):
r"""
Patch "step" method to choose IPEX optimized fused update kernel.
"""
if not hasattr(optimizer, "params_attr"):
setattr(optimizer, "params_attr", {}) # noqa: B010
try:
if device_type == "cpu":
step = OPTIMIZER_FUSED_STEP_MAPPING_CPU[type(optimizer)]
elif device_type == "xpu":
step = OPTIMIZER_FUSED_STEP_MAPPING_XPU[type(optimizer)]
else:
warnings.warn(
"IPEX does not support device type "
+ str(device_type)
+ ". For now, only support CPU, XPU."
)
return optimizer
if not hasattr(optimizer, "_original_step"):
setattr(optimizer, "_original_step", optimizer.step) # noqa: B010
optimizer.step = types.MethodType(step, optimizer)
setattr(optimizer, "fused", True) # noqa: B010
except KeyError:
warnings.warn(
"Does not suport fused step for "
+ str(type(optimizer))
+ ", will use non-fused step"
)
return optimizer
| 15,121 | 37.675192 | 107 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/optim/_lamb.py | import torch
from ._functional import _lamb_impl
class Lamb(torch.optim.Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning:
Training BERT in 76 minutes`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
fused (boolean, optional): whether to use fused kernel to accelerate
(default: False)
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, fused=False
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, fused=fused
)
super(Lamb, self).__init__(params, defaults)
self.params_attr = {}
self.fused = fused
def __setstate__(self, state):
super(Lamb, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
trails = []
state_steps = []
for p in group["params"]:
grad = p.grad
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError("Lamb does not support sparse gradients")
grads.append(grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state["step"] = 0
buffer_dtype = (
p.dtype if p.dtype is torch.float64 else torch.float
)
state["exp_avg"] = torch.zeros(
p.shape, dtype=buffer_dtype, device=p.device
)
state["exp_avg_sq"] = torch.zeros(
p.shape, dtype=buffer_dtype, device=p.device
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
# update the steps for each param group update
state["step"] += 1
# record the step after step update
state_steps.append(state["step"])
beta1, beta2 = group["betas"]
_lamb_impl(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1,
beta2,
group["lr"],
group["weight_decay"],
group["eps"],
)
return loss
| 4,230 | 37.117117 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/optim/_functional.py | r"""Functional interface, port from torch/optim/_function.py"""
import torch
from torch import Tensor
from typing import List, Optional
def is_master_weight(param, params_attr):
if len(params_attr) == 0 or param not in params_attr:
return False
_param = params_attr[param].parameter
return (
param.dtype == torch.float
and _param is not None
and _param.dtype == torch.bfloat16
)
def get_bf16_grad(param, params_attr):
assert is_master_weight(param, params_attr)
return params_attr[param].parameter.grad
def get_param2(param, params_attr):
# For pure fp32 case, param2 is not needed.
# For master weight case, param2 is the bf16 copy of fp32 weight
# For master weight split case, param2 is the trail part of fp32 weight
param2 = torch.Tensor()
if param in params_attr:
if params_attr[param].parameter_trail is not None:
assert param.dtype is torch.bfloat16
param2 = params_attr[param].parameter_trail
elif is_master_weight(param, params_attr):
param2 = params_attr[param].parameter
return param2
def _make_sparse(grad, grad_indices, values):
size = grad.size()
if grad_indices.numel() == 0 or values.numel() == 0:
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size)
def _single_tensor_adagrad(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[Tensor],
*,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
has_sparse_grad: bool,
maximize: bool,
fused: bool
):
for param, param2, grad, state_sum, step_t in zip(
params, params2, grads, state_sums, state_steps
):
# update step
step_t += 1
step = step_t.item()
grad = grad if not maximize else -grad
if not (grad.is_sparse or torch.is_complex(param)):
torch.ops.torch_ipex.adagrad_fused_step(
param, grad, state_sum, param2, step, lr, weight_decay, lr_decay, eps
)
continue
if weight_decay != 0:
if grad.is_sparse:
raise RuntimeError(
"weight_decay option is not compatible with sparse gradients"
)
grad = grad.add(param, alpha=weight_decay)
grad = grad.to(param.dtype)
clr = lr / (1 + (step - 1) * lr_decay)
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))
std = state_sum.sparse_mask(grad)
std_values = std._values().sqrt_().add_(eps)
param.add_(
_make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr
)
else:
is_complex = torch.is_complex(param)
if is_complex:
grad = torch.view_as_real(grad)
state_sum = torch.view_as_real(state_sum)
param = torch.view_as_real(param)
state_sum.addcmul_(grad, grad, value=1)
std = state_sum.sqrt().add_(eps)
param.addcdiv_(grad, std, value=-clr)
if is_complex:
param = torch.view_as_complex(param)
state_sum = torch.view_as_complex(state_sum)
# keep this function here if enable fused_foreach_adagrad_later
def _multi_tensor_adagrad(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[Tensor],
*,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
has_sparse_grad: bool,
maximize: bool,
fused: bool
):
# Foreach functions will throw errors if given empty lists
if len(params) == 0:
return
if maximize:
grads = torch._foreach_neg(grads)
_single_tensor_adagrad(
params,
params2,
grads,
state_sums,
state_steps,
lr=lr,
weight_decay=weight_decay,
lr_decay=lr_decay,
eps=eps,
has_sparse_grad=has_sparse_grad,
maximize=False,
fused=fused,
)
return
def adagrad(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting these as kwargs for now as functional API is compiled by torch/distributed/optim
has_sparse_grad: bool = None,
foreach: bool = None,
*,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
maximize: bool,
fused: bool
):
r"""Functional API that performs Adagrad algorithm computation.
See :class:`~torch.optim.Adagrad` for details.
"""
if not all([isinstance(t, torch.Tensor) for t in state_steps]):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adagrad
else:
func = _single_tensor_adagrad
func(
params,
params2,
grads,
state_sums,
state_steps,
lr=lr,
weight_decay=weight_decay,
lr_decay=lr_decay,
eps=eps,
has_sparse_grad=has_sparse_grad,
maximize=maximize,
fused=fused,
)
@torch.no_grad()
def adagrad_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
params2 = []
grads = []
state_sums = []
state_steps = []
has_sparse_grad = False
for p in group["params"]:
grad = (
get_bf16_grad(p, self.params_attr)
if is_master_weight(p, self.params_attr)
else p.grad
)
if grad is not None:
if grad.is_sparse:
has_sparse_grad = True
params_with_grad.append(p)
grads.append(grad)
state = self.state[p]
state_sums.append(state["sum"])
state_steps.append(state["step"])
param2 = get_param2(p, self.params_attr)
params2.append(param2)
adagrad(
params_with_grad,
params2,
grads,
state_sums,
state_steps,
lr=group["lr"],
weight_decay=group["weight_decay"],
lr_decay=group["lr_decay"],
eps=group["eps"],
has_sparse_grad=has_sparse_grad,
foreach=group["foreach"],
maximize=group["maximize"],
fused=self.fused,
)
return loss
def _sgd_non_fused_micro_step(
param: Tensor,
grad: Tensor,
momentum_buffer: Optional[Tensor],
momentum: float,
lr: float,
weight_decay: float,
dampening: float,
nesterov: bool,
):
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer
if buf is None:
buf = torch.clone(grad).detach()
momentum_buffer = buf
else:
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
param.add_(grad, alpha=-lr)
return momentum_buffer
def _single_tensor_sgd(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
has_sparse_grad: bool,
fused: bool
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
if not grad.is_sparse:
momentum_buffer_list[i] = torch.ops.torch_ipex.sgd_fused_step(
param,
grad,
momentum_buffer_list[i],
params2[i],
momentum,
lr,
weight_decay,
dampening,
nesterov,
)
continue
if (
param.dtype == torch.bfloat16
and grad.is_sparse
and grad.dtype == torch.bfloat16
and weight_decay == 0
and momentum == 0
):
# packed_add can support sparse tensor
torch.ops.torch_ipex.packed_add(param, params2[i], grad, -lr)
else:
# no special optimize for other non fused case, fall back to naive implementation
grad = grad.to(param.dtype)
momentum_buffer_list[i] = _sgd_non_fused_micro_step(
param,
grad,
momentum_buffer_list[i],
momentum,
lr,
weight_decay,
dampening,
nesterov,
)
def _single_tensor_lars(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
*,
eeta: float,
eps: float,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
has_sparse_grad: bool,
fused: bool
):
if maximize:
lr = -lr
for i, param in enumerate(params):
# if not grads[i].is_sparse:
momentum_buffer_list[i] = torch.ops.torch_ipex.lars_fused_step(
param,
grads[i],
momentum_buffer_list[i],
params2[i],
momentum,
lr,
eeta,
eps,
weight_decay,
dampening,
nesterov,
)
# continue
# keep this function here if enable fused_foreach_sgd_later
def _multi_tensor_sgd(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
has_sparse_grad: bool,
fused: bool
):
if len(params) == 0:
return
_single_tensor_sgd(
params,
params2,
grads,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
maximize=maximize,
has_sparse_grad=has_sparse_grad,
fused=fused,
)
def sgd(
params: List[Tensor],
params2: List[Tensor],
d_p_list: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
has_sparse_grad: bool = None,
foreach: bool = None,
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
fused: bool
):
r"""Functional API that performs SGD algorithm computation.
See :class:`~torch.optim.SGD` for details.
"""
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_sgd
else:
func = _single_tensor_sgd
func(
params,
params2,
d_p_list,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
has_sparse_grad=has_sparse_grad,
maximize=maximize,
fused=fused,
)
@torch.no_grad()
def sgd_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
params2 = []
d_p_list = []
momentum_buffer_list = []
has_sparse_grad = False
for p in group["params"]:
grad = (
get_bf16_grad(p, self.params_attr)
if is_master_weight(p, self.params_attr)
else p.grad
)
if grad is not None:
params_with_grad.append(p)
d_p_list.append(grad)
if grad.is_sparse:
has_sparse_grad = True
state = self.state[p]
if "momentum_buffer" not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state["momentum_buffer"])
param2 = get_param2(p, self.params_attr)
params2.append(param2)
sgd(
params_with_grad,
params2,
d_p_list,
momentum_buffer_list,
weight_decay=group["weight_decay"],
momentum=group["momentum"],
lr=group["lr"],
dampening=group["dampening"],
nesterov=group["nesterov"],
maximize=group["maximize"],
has_sparse_grad=has_sparse_grad,
foreach=group["foreach"],
fused=self.fused,
)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state["momentum_buffer"] = momentum_buffer
return loss
def lars(
params: List[Tensor],
params2: List[Tensor],
d_p_list: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
has_sparse_grad: bool = None,
foreach: bool = None,
*,
eeta: float,
eps: float,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
fused: bool
):
r"""Functional API that performs LARS algorithm computation.
dampening = 0
nesterov = False
maximize = False
"""
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
func = _single_tensor_lars
func(
params,
params2,
d_p_list,
momentum_buffer_list,
eeta=eeta,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
has_sparse_grad=has_sparse_grad,
maximize=maximize,
fused=fused,
)
@torch.no_grad()
def lars_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
params2 = []
d_p_list = []
momentum_buffer_list = []
has_sparse_grad = False
for p in group["params"]:
grad = (
get_bf16_grad(p, self.params_attr)
if is_master_weight(p, self.params_attr)
else p.grad
)
if grad is not None:
params_with_grad.append(p)
d_p_list.append(grad)
if grad.is_sparse:
has_sparse_grad = True
state = self.state[p]
if "momentum_buffer" not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state["momentum_buffer"])
param2 = get_param2(p, self.params_attr)
params2.append(param2)
if group["lars"]:
lars(
params_with_grad,
params2,
d_p_list,
momentum_buffer_list,
eeta=group["eeta"],
eps=group["epsilon"],
weight_decay=group["weight_decay"],
momentum=group["momentum"],
lr=group["lr"],
dampening=0,
nesterov=0,
maximize=0,
has_sparse_grad=has_sparse_grad,
foreach=None,
fused=self.fused,
)
else:
sgd(
params_with_grad,
params2,
d_p_list,
momentum_buffer_list,
weight_decay=group["weight_decay"],
momentum=group["momentum"],
lr=group["lr"],
dampening=0,
nesterov=0,
maximize=0,
has_sparse_grad=has_sparse_grad,
foreach=None,
fused=self.fused,
)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state["momentum_buffer"] = momentum_buffer
return loss
def _lamb_fused_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
attr: dict,
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
r"""Functional API that performs Lamb algorithm computation.
See :class:`~torch.optim.Lamb` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
param2 = get_param2(param, attr)
torch.ops.torch_ipex.lamb_fused_step(
param,
exp_avg,
exp_avg_sq,
grad,
param2,
step,
beta1,
beta2,
lr,
weight_decay,
eps,
)
def _lamb_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
r"""Functional API that performs Lamb algorithm computation."""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
grad = grad.to(exp_avg.dtype)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
adam_step = (exp_avg / bias_correction1) / (
(exp_avg_sq / bias_correction2).sqrt() + eps
)
if weight_decay != 0:
adam_step.add_(param, alpha=weight_decay)
weight_norm = param.norm(p=2)
rtw_norm = adam_step.norm(p=2)
true_ratio = weight_norm / rtw_norm
param.add_(adam_step, alpha=-lr * true_ratio)
@torch.no_grad()
def lamb_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
trails = []
state_steps = []
for p in group["params"]:
grad = (
get_bf16_grad(p, self.params_attr)
if is_master_weight(p, self.params_attr)
else p.grad
)
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError("Lamb does not support sparse gradients")
grads.append(grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state["step"] = 0
buffer_dtype = p.dtype if p.dtype is torch.float64 else torch.float
state["exp_avg"] = torch.zeros(
p.shape, dtype=buffer_dtype, device=p.device
)
state["exp_avg_sq"] = torch.zeros(
p.shape, dtype=buffer_dtype, device=p.device
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
# update the steps for each param group update
state["step"] += 1
# record the step after step update
state_steps.append(state["step"])
beta1, beta2 = group["betas"]
_lamb_fused_impl(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
self.params_attr,
state_steps,
beta1,
beta2,
group["lr"],
group["weight_decay"],
group["eps"],
)
return loss
@torch.no_grad()
def adam_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
params2 = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group["betas"]
for p in group["params"]:
grad = (
get_bf16_grad(p, self.params_attr)
if is_master_weight(p, self.params_attr)
else p.grad
)
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
grads.append(grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
buffer_dtype = p.dtype if p.dtype is torch.float64 else torch.float
state["step"] = torch.tensor(0.0)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format, dtype=buffer_dtype
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format, dtype=buffer_dtype
)
if group["amsgrad"]:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format, dtype=buffer_dtype
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if group["amsgrad"]:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
param2 = get_param2(p, self.params_attr)
params2.append(param2)
adam(
params_with_grad,
params2,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group["amsgrad"],
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
maximize=group["maximize"],
foreach=group["foreach"],
)
return loss
def adam(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: bool = None,
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool
):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
if not all([isinstance(t, torch.Tensor) for t in state_steps]):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adam
else:
func = _single_tensor_adam
func(
params,
params2,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
maximize=maximize,
)
def _single_tensor_adam(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
if amsgrad:
max_exp_avg_sq = max_exp_avg_sqs[i]
else:
max_exp_avg_sq = torch.Tensor()
step_t = state_steps[i]
param2 = params2[i]
# update step
step_t += 1
step = step_t.item()
torch.ops.torch_ipex.adam_fused_step(
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
param2,
amsgrad,
step,
beta1,
beta2,
lr,
weight_decay,
eps,
)
def _multi_tensor_adam(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool
):
if len(params) == 0:
return
if maximize:
grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment]
_single_tensor_adam(
params,
params2,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
maximize=False,
)
def adamw(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: bool = None,
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool
):
r"""Functional API that performs Adam algorithm computation.
See :class:`~torch.optim.Adam` for details.
"""
if not all([isinstance(t, torch.Tensor) for t in state_steps]):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
# TODO: no foreach for now, so default false when passed
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adamw
else:
func = _single_tensor_adamw
func(
params,
params2,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
maximize=maximize,
)
def _single_tensor_adamw(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
if amsgrad:
max_exp_avg_sq = max_exp_avg_sqs[i]
else:
max_exp_avg_sq = torch.Tensor()
step_t = state_steps[i]
param2 = params2[i]
# update step
step_t += 1
step = step_t.item()
torch.ops.torch_ipex.adamw_fused_step(
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
param2,
amsgrad,
step,
beta1,
beta2,
lr,
weight_decay,
eps,
)
def _multi_tensor_adamw(
params: List[Tensor],
params2: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
maximize: bool
):
if len(params) == 0:
return
if maximize:
grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment]
_single_tensor_adamw(
params,
params2,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
maximize=False,
)
@torch.no_grad()
def adamw_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
# fp32 master weight and fp32 weight(some layer no need cast)
params_with_grad = []
# bf16 weight(mapped to fp32 master weight) and empty tensor(empty means no need casted layer's weight)
params2 = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group["betas"]
for p in group["params"]:
# params_attr: {'layer.master_weight(fp32)': {'bf16_param': 'layer.weight(bf16)'}}
grad = (
get_bf16_grad(p, self.params_attr)
if is_master_weight(p, self.params_attr)
else p.grad
)
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
grads.append(grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
buffer_dtype = p.dtype
if p.dtype is not torch.float:
raise RuntimeError(
"parameter in optimizer(Adamw) is not FP32, need check"
)
state["step"] = torch.tensor(0.0)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format, dtype=buffer_dtype
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format, dtype=buffer_dtype
)
if group["amsgrad"]:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format, dtype=buffer_dtype
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if group["amsgrad"]:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
param2 = get_param2(p, self.params_attr)
params2.append(param2)
adamw(
params_with_grad,
params2,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group["amsgrad"],
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
maximize=group["maximize"],
foreach=group["foreach"],
)
return loss
| 36,453 | 27.106399 | 111 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/jit/_trace.py | import torch
from functools import wraps
# For CPU, wrap torch.jit.trace to disable autocast cache when using torch.jit.trace
# within the scope of torch.cpu.amp.autocast.
# See https://github.com/pytorch/pytorch/pull/63552 for more information.
# For XPU, wrap torch.jit.trace to disable the check trace to avoid the double floating
# computing for the xpu platform which unsupports 2d block
def need_to_disable_check_trace_for_XPU(*args, **kwargs):
device_type_list = []
def check_input_tensor(arg):
for elm in arg:
if isinstance(elm, torch.Tensor):
device_type_list.append(elm.device.type)
else:
check_input_tensor(elm)
for arg in args:
if isinstance(arg, torch.Tensor):
device_type_list.append(arg.device.type)
elif isinstance(arg, tuple) or isinstance(arg, list):
check_input_tensor(arg)
elif isinstance(arg, dict):
check_input_tensor(list(arg.values()))
else:
pass
if "example_inputs" in kwargs:
example_inputs = kwargs["example_inputs"]
if isinstance(example_inputs, torch.Tensor):
device_type_list.append(example_inputs.device.type)
elif isinstance(example_inputs, tuple) or isinstance(example_inputs, list):
check_input_tensor(example_inputs)
elif isinstance(example_inputs, dict):
check_input_tensor(list(example_inputs.values()))
else:
pass
is_xpu = all([elm == "xpu" for elm in device_type_list])
if (
is_xpu
and ("check_trace" not in kwargs)
and (not torch.xpu.has_2d_block_array())
):
return True
return False
def jit_trace_wrapper(f):
@wraps(f)
def wrapper(*args, **kwargs):
prev = torch.is_autocast_cache_enabled()
# For running CPU workload, disable autocast cache
if torch.is_autocast_cpu_enabled():
torch.set_autocast_cache_enabled(False)
# For running XPU workload and the platform unsupports 2d block,
# the check_trace is here disabled in jit trace to avoid double computing
if torch.xpu.is_available() and need_to_disable_check_trace_for_XPU(
*args, **kwargs
):
kwargs["check_trace"] = False
traced = f(*args, **kwargs)
torch.set_autocast_cache_enabled(prev)
return traced
return wrapper
torch.jit.trace = jit_trace_wrapper(torch.jit.trace)
| 2,504 | 31.532468 | 87 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/auto_ipex.py | import os
import platform
import glob
import logging
import sys
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
from tempfile import mkstemp
import uuid
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=format_str)
logger = logging.getLogger(__name__)
def apply_monkey_patch(program, dtype, auto_ipex_verbose, disable_ipex_graph_mode):
# Auto apply the ipex features
# Open the original file and get the content
with open(program) as f:
original_program_lines = f.readlines()
# Modify the content with import ipex
monkey_patch = """import torch
import intel_extension_for_pytorch as ipex
from typing import Any, Callable
import functools
import threading
def set_optimized_attr(model):
setattr(model, "_ipex_optimized", True)
for child_name, child in model.named_children():
set_optimized_attr(child)
_orig_module_call: Callable = torch.nn.Module.__call__
_auto_ipex_thread_local_storage = threading.local()
setattr(_auto_ipex_thread_local_storage, "nested_level", 0)
class nested_optimized(object):
def __enter__(self):
global _auto_ipex_thread_local_storage
_auto_ipex_thread_local_storage.nested_level = getattr(_auto_ipex_thread_local_storage, 'nested_level', 0) + 1
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any):
global _auto_ipex_thread_local_storage
_auto_ipex_thread_local_storage.nested_level -= 1
return False
# _ipex_optimize_hit_count record how many times hit the optimized path.
# Only used when _auto_ipex_verbose is True.
_ipex_optimize_hit_count = 0
@functools.wraps(_orig_module_call)
def module_call_wrapper(mod, *args, **kwargs):
def forward(mod, *args, **kwargs):
# We skip the optimize path in below 2 cases to avoid performance overhead.
# * Case 1: hasattr(mod, "_ipex_optimized"). For the module and submodules after
# optimized, this attr will be added to avoid duplicated invocation.
# * Case 2: nested_level != 0. Some modules in huggingface is created during
# the forward function instead of the __init__ function. We are unable to add
# the _ipex_optimized attr for this kind of module, so we will use the nested_level
# to avoid unnecessary invocation for this kind of module.
if not hasattr(mod, "_ipex_optimized") and (getattr(_auto_ipex_thread_local_storage, 'nested_level', 0)==0):
set_optimized_attr(mod)
dataType = torch.bfloat16 if ({0} == True) else torch.float32
optimized_m = ipex.optimize(mod.eval(), dtype=dataType, graph_mode=(None if ({2} == True) else True)).eval()
set_optimized_attr(optimized_m)
def optimized_m_forward(*args, **kwargs):
with torch.cpu.amp.autocast(enabled={0}), torch.no_grad(), nested_optimized():
return optimized_m(*args, **kwargs)
if not {2}:
# Warm up run to finish some warm up steps for graph mode in ipex.optimize
for _ in range(3):
optimized_m_forward(*args, **kwargs)
if {1}:
# This path is valid only when auto_ipex_verbose is True.
# And this path is only used for debug and UT.
global _ipex_optimize_hit_count
_ipex_optimize_hit_count += 1
print("_ipex_optimize_hit_count is: %d" % _ipex_optimize_hit_count, flush=True)
# Profile once to check whether ipex.optimize success or not
with torch.profiler.profile(
activities=[torch.profiler.ProfilerActivity.CPU]
) as prof:
optimized_m_forward(*args, **kwargs)
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=-1))
mod.forward = optimized_m_forward
return _orig_module_call(mod, *args, **kwargs)
return forward(mod, *args, **kwargs)
setattr(torch.nn.Module, "__call__", module_call_wrapper)\n""".format(
dtype.lower() == "bfloat16", auto_ipex_verbose, disable_ipex_graph_mode
)
original_program_lines.insert(0, monkey_patch)
program_absolute_path = os.path.abspath(program)
program_absolute_path_dir = os.path.dirname(program_absolute_path)
generate_file_suffix = (
str(hash(program_absolute_path)) + str(uuid.uuid1()) + "_auto_ipex"
)
_, generate_file = mkstemp(
suffix=generate_file_suffix, dir=program_absolute_path_dir, text=True
)
# Write the monkey_patched content to temp file
with open(generate_file, "w") as f:
f.writelines(original_program_lines)
return generate_file
def _exec(args):
monkey_patch_program = apply_monkey_patch(
args.program, args.dtype, args.auto_ipex_verbose, args.disable_ipex_graph_mode
)
try:
cmd = []
cmd.append(sys.executable)
cmd.append("-u")
cmd.append(monkey_patch_program)
cmd.extend(args.program_args)
cmd_s = " ".join(cmd)
print("cmd_s is:{}".format(cmd_s))
os.system(cmd_s)
finally:
# Remove the Monkey patched program
if os.path.exists(monkey_patch_program):
os.remove(monkey_patch_program)
def add_auto_ipex_params(parser, auto_ipex_default_enabled=False):
group = parser.add_argument_group("Code_Free Parameters")
group.add_argument(
"--auto-ipex",
"--auto_ipex",
action="store_true",
default=auto_ipex_default_enabled,
help="Auto enabled the ipex optimization feature",
)
group.add_argument(
"--dtype",
metavar="\b",
default="float32",
type=str,
choices=["float32", "bfloat16"],
help="The data type to run inference. float32 or bfloat16 is allowed.",
)
group.add_argument(
"--auto-ipex-verbose",
"--auto_ipex_verbose",
action="store_true",
default=False,
help="This flag is only used for debug and UT of auto ipex.",
)
group.add_argument(
"--disable-ipex-graph-mode",
"--disable_ipex_graph_mode",
action="store_true",
default=False,
help="Enable the Graph Mode for ipex.optimize",
)
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description="This is a script for auto apply ipex optimization."
"\n################################# Basic usage ############################# \n"
"\n 1. Apply ipex optimization with fp32 data type\n"
"\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \n"
"\n 2. Apply ipex optimization with bf16 data type\n"
"\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \n",
formatter_class=RawTextHelpFormatter,
)
add_auto_ipex_params(parser, auto_ipex_default_enabled=True)
# positional
parser.add_argument(
"program",
type=str,
help="The full path to the proram/script to be launched. "
"followed by all the arguments for the script",
)
# rest from the training program
parser.add_argument("program_args", nargs=REMAINDER)
return parser.parse_args()
def main():
env_before = set(os.environ.keys())
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported!!!")
args = parse_args()
# Verify LD_PRELOAD
if "LD_PRELOAD" in os.environ:
lst_valid = []
tmp_ldpreload = os.environ["LD_PRELOAD"]
for item in tmp_ldpreload.split(":"):
if item != "":
matches = glob.glob(item)
if len(matches) > 0:
lst_valid.append(item)
else:
logger.warning(
"{} doesn't exist. Removing it from LD_PRELOAD.".format(item)
)
if len(lst_valid) > 0:
os.environ["LD_PRELOAD"] = ":".join(lst_valid)
else:
os.environ["LD_PRELOAD"] = ""
_exec(args)
for x in sorted(set(os.environ.keys()) - env_before):
# Print the added ENV
logger.debug("{0}={1}".format(x, os.environ[x]))
if __name__ == "__main__":
main()
| 8,482 | 35.564655 | 120 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/onednn_fusion.py | import intel_extension_for_pytorch._C as core
def enable_onednn_fusion(enabled):
r"""
Enables or disables oneDNN fusion functionality. If enabled, oneDNN
operators will be fused in runtime, when intel_extension_for_pytorch
is imported.
Args:
enabled (bool): Whether to enable oneDNN fusion functionality or not.
Default value is ``True``.
Examples:
>>> import intel_extension_for_pytorch as ipex
>>> # to enable the oneDNN fusion
>>> ipex.enable_onednn_fusion(True)
>>> # to disable the oneDNN fusion
>>> ipex.enable_onednn_fusion(False)
"""
if enabled:
core.enable_jit_opt()
else:
core.disable_jit_opt()
| 723 | 25.814815 | 77 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/graph_capture.py | import copy
import torch
from torch._dynamo.backends.common import fake_tensor_unsupported
from torch.jit._trace import TracerWarning
from enum import IntEnum
from typing import List
import functools
import logging
import threading
import warnings
class RunMethods(IntEnum):
JIT = 1
TorchDynamo = 2
EagerInfer = 3
EagerTrain = 4
class GraphCapture(object):
def __init__(self, model, train, dtype, weights_prepack):
self.model = copy.deepcopy(model)
self.train = train
self.dtype = dtype
self.weights_prepack = weights_prepack
self.method = None
self.lock = threading.Lock()
def __call__(self, func):
@fake_tensor_unsupported
def compiler(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
try:
with torch.no_grad():
traced_model = torch.jit.trace(gm.eval(), example_inputs)
traced_model = torch.jit.freeze(traced_model)
return traced_model
except Exception:
warnings.warn("JIT trace failed during the 'compiler' process.")
return gm
@functools.wraps(func)
def forward(*input, **kwargs):
if torch.jit.is_tracing():
return func(*input, **kwargs)
with torch.cpu.amp.autocast(
enabled=(self.dtype == torch.bfloat16 or self.dtype == torch.half),
dtype=self.dtype,
):
if self.method:
if self.train:
return func(*input, **kwargs)
else:
return self.model(*input, **kwargs)
else:
# Lock the graph generation process to avoid multiple threads generating graph simultaneously.
with self.lock:
if self.method:
if self.train:
return func(*input, **kwargs)
else:
return self.model(*input, **kwargs)
if self.train:
warnings.warn(
"graph capture does not support training yet."
)
self.method = RunMethods.EagerTrain
return func(*input, **kwargs)
else:
try:
# Try JIT trace.
# Tracing only records operations done when the given function is run on the given
# tensors. Therefore, the returned ScriptModule will always run the same traced graph
# on any input. This has some important implications when your module is expected
# to run different sets of operations, depending on the input and/or the module state.
# In cases like these, tracing would not be appropriate, and the tracer will try to
# emit warnings when doing something that may cause an incorrect trace to be produced.
# Therefore, we catch these warnings and treat them as errors, and let TorchDynamo
# handle such models appropriately.
with warnings.catch_warnings():
warnings.filterwarnings(
"error", category=TracerWarning
)
traced_model = torch.jit.trace(
self.model.eval(), input
).eval()
traced_model = torch.jit.freeze(traced_model)
output = traced_model(*input, **kwargs)
self.model = traced_model
self.method = RunMethods.JIT
logging.debug("generate graph by JIT trace.")
return output
except BaseException:
try:
# JIT trace failed, try torchdynamo with JIT trace backend.
torch._dynamo.reset()
dynamo_model = torch._dynamo.optimize(
compiler, dynamic=True
)(self.model)
output = dynamo_model(*input, **kwargs)
self.model = dynamo_model
self.method = RunMethods.TorchDynamo
logging.debug("generate graph by TorchDynamo.")
return output
except BaseException:
warnings.warn(
"Both JIT and TorchDynamo failed, fallback to original model."
)
self.method = RunMethods.EagerInfer
torch._dynamo.reset()
return self.model(*input, **kwargs)
return forward
| 5,510 | 46.921739 | 118 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/nn/linear_fuse_eltwise.py | import torch
import intel_extension_for_pytorch as ipex # noqa F401
from intel_extension_for_pytorch.nn.utils._weight_prepack import _IPEXLinear as _IPEXLinear
import enum
class EltwiseType(enum.IntEnum):
NotFused = 0
ReLU = 1
Sigmoid = 2
class IPEXLinearEltwise(torch.nn.Module):
def __init__(self, ipex_linear_module, eltwise="relu"):
super(IPEXLinearEltwise, self).__init__()
assert isinstance(ipex_linear_module, _IPEXLinear)
self.m = ipex_linear_module
self.out_features = ipex_linear_module.out_features
if eltwise == "relu":
self.eltwise = EltwiseType.ReLU
else:
assert eltwise == "sigmoid"
self.eltwise = EltwiseType.Sigmoid
def forward(self, x):
return torch.ops.torch_ipex.ipex_linear_eltwise(
x,
self.m.weight,
self.m.bias,
self.eltwise,
self.m.ctx.get_data_handle(),
self.out_features,
)
| 998 | 28.382353 | 91 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/nn/_roi_align.py | from torch import nn, Tensor
from torch.jit.annotations import BroadcastingList2
from ...nn import functional as F
class RoIAlign(nn.Module):
"""
See :func:`roi_align`.
"""
def __init__(
self,
output_size: BroadcastingList2[int],
spatial_scale: float,
sampling_ratio: int,
aligned: bool = False,
):
super(RoIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return F._roi_align_helper.roi_align(
input,
rois,
self.output_size,
self.spatial_scale,
self.sampling_ratio,
self.aligned,
)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
| 1,198 | 26.883721 | 64 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/nn/_embeddingbag.py | import torch
import warnings
import intel_extension_for_pytorch._C as core
from typing import Optional, Tuple
Tensor = torch.Tensor
def _embedding_bag_fast_path_sum(
weights: Tensor,
indices: Tensor,
offsets: Tensor,
mode: int = 0,
scale_grad_by_freq: bool = False,
per_sample_weights: Optional[Tensor] = None,
padding_idx: Optional[int] = None,
) -> bool:
if indices.dtype != torch.int64 or offsets.dtype != torch.int64:
return False
if mode != 0 or scale_grad_by_freq:
return False
if weights.stride(1) != 1 or weights.dtype not in (torch.float, torch.bfloat16):
return False
if per_sample_weights is not None or padding_idx is not None:
return False
return True
torch_embedding_bag = torch.embedding_bag
def patch_emb_bag_cpu_only(func):
def wrapper(
weights: Tensor,
indices: Tensor,
offsets: Tensor,
scale_grad_by_freq: bool = False,
mode: int = 0,
sparse: bool = False,
per_sample_weights: Optional[Tensor] = None,
include_last_offset: bool = False,
padding_idx: Optional[int] = None,
):
all_cpu = (
weights.device.type == "cpu"
and indices.device.type == "cpu"
and offsets.device.type == "cpu"
and (
True
if per_sample_weights is None
else per_sample_weights.device.type == "cpu"
)
)
if all_cpu:
return func(
weights,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
else:
return torch_embedding_bag(
weights,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
return wrapper
@patch_emb_bag_cpu_only
def _embeddingbag(
weights: Tensor,
indices: Tensor,
offsets: Tensor,
scale_grad_by_freq: bool = False,
mode: int = 0,
sparse: bool = False,
per_sample_weights: Optional[Tensor] = None,
include_last_offset: bool = False,
padding_idx: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
if _embedding_bag_fast_path_sum(
weights,
indices,
offsets,
mode,
scale_grad_by_freq,
per_sample_weights,
padding_idx,
):
ret = torch.ops.torch_ipex.embedding_bag(
weights, indices, offsets, sparse, include_last_offset
)
# torch.embedding_bag expected 4 Tensor returned
# here we only return 1 tensor since the other three tensors are not needed in our fast path
ret = (ret, torch.empty(0), torch.empty(0), torch.empty(0))
else:
warnings.warn("Fallback to torch.embedding bag")
ret = torch_embedding_bag(
weights,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx,
)
return ret
if core._has_cpu():
torch.embedding_bag = _embeddingbag
| 3,460 | 26.251969 | 100 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/nn/interaction.py | import torch
from torch.autograd import Function
def interaction(*args):
r"""
Get the interaction feature beyond different kinds of features (like gender
or hobbies), used in DLRM model.
For now, we only optimized "dot" interaction at `DLRM Github repo
<https://github.com/facebookresearch/dlrm/blob/main/dlrm_s_pytorch.py#L475-L495>`_.
Through this, we use the dot product to represent the interaction feature
between two features.
For example, if feature 1 is "Man" which is represented by [0.1, 0.2, 0.3],
and feature 2 is "Like play football" which is represented by [-0.1, 0.3, 0.2].
The dot interaction feature is
([0.1, 0.2, 0.3] * [-0.1, 0.3, 0.2]^T) = -0.1 + 0.6 + 0.6 = 1.1
Args:
*args: Multiple tensors which represent different features
Shape
- Input: :math:`N * (B, D)`, where N is the number of different kinds of features,
B is the batch size, D is feature size
- Output: :math:`(B, D + N * ( N - 1 ) / 2)`
"""
if torch.is_grad_enabled():
return InteractionFunc.apply(*args)
return torch.ops.torch_ipex.interaction_forward(args)
class InteractionFunc(Function):
@staticmethod
def forward(ctx, *args):
ctx.save_for_backward(*args)
output = torch.ops.torch_ipex.interaction_forward(args)
return output
@staticmethod
def backward(ctx, grad_out):
args = ctx.saved_tensors
grad_in = torch.ops.torch_ipex.interaction_backward(grad_out.contiguous(), args)
return tuple(grad_in)
| 1,570 | 32.425532 | 90 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/nn/frozen_batch_norm.py | import torch
from torch import nn
class FrozenBatchNorm2d(nn.Module):
r"""
BatchNorm2d where the batch statistics and the affine parameters are fixed
Args:
num_features (int): :math:`C` from an expected input of size :math:`(N, C, H, W)`
Shape
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
):
super(FrozenBatchNorm2d, self).__init__()
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
def forward(self, input):
return torch.ops.torch_ipex.frozen_batch_norm(
input, self.weight, self.bias, self.running_mean, self.running_var, self.eps
)
| 1,002 | 29.393939 | 89 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/nn/_roi_align_helper.py | # This Python file uses the following encoding: utf-8
from typing import List, Union
import torch
from torch import Tensor
from torch.nn.modules.utils import _pair
from torch.jit.annotations import BroadcastingList2
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
# TODO add back the assert
# assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def _convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = _cat(list(b for b in boxes), dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def _check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
assert (
_tensor.size(1) == 4
), "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
elif isinstance(boxes, torch.Tensor):
assert (
boxes.size(1) == 5
), "The boxes tensor shape is not correct as Tensor[K, 5]"
else:
raise ValueError(
"boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]"
)
return
def roi_align(
input: Tensor,
boxes: Union[Tensor, List[Tensor]],
output_size: BroadcastingList2[int],
spatial_scale: float = 1.0,
sampling_ratio: int = -1,
aligned: bool = False,
) -> Tensor:
"""
Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN.
It is optimized with parallelization and channels last support on the basis of the torchvision's roi_align.
The semantics of Intel® Extension for PyTorch* roi_align is exactly the same as that of torchvision.
We override roi_align in the torchvision with Intel® Extension for PyTorch* roi_align via ATen op registration.
It is activated when Intel® Extension for PyTorch* is imported from the Python frontend or when it is linked
by a C++ program. It is fully transparent to users.
In certain cases, if you are using a self-implemented roi_align class or function that behave exactly the same as
the ones in torchvision, please import the optimized one in Intel® Extension for PyTorch* as the following
examples to get performance boost on Intel platforms.
.. highlight:: python
.. code-block:: python
from intel_extension_for_pytorch import roi_align
or
.. highlight:: python
.. code-block:: python
from intel_extension_for_pytorch import RoIAlign
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
If the tensor is quantized, we expect a batch size of ``N == 1``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the input coordinates to
the box coordinates. Default: 1.0
sampling_ratio (int): number of sampling points in the interpolation grid
used to compute the output value of each pooled output bin. If > 0,
then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If
<= 0, then an adaptive number of grid points are used (computed as
``ceil(roi_width / output_width)``, and likewise for height). Default: -1
aligned (bool): If False, use the legacy implementation.
If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two
neighboring pixel indices. This version is used in Detectron2
Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
"""
_check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = _convert_boxes_to_roi_format(rois)
return torch.ops.torch_ipex.ROIAlign_forward(
input,
rois,
spatial_scale,
output_size[0],
output_size[1],
sampling_ratio,
aligned,
)
| 5,016 | 39.788618 | 117 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/launch/launcher_multi_instances.py | import sys
import subprocess
import os
import intel_extension_for_pytorch.cpu.auto_ipex as auto_ipex
from .launcher_base import Launcher
class MultiInstancesLauncher(Launcher):
"""
Launcher for single instance and multi-instance
"""
def __init__(self, logger=None, lscpu_txt=""):
super(MultiInstancesLauncher, self).__init__(logger, lscpu_txt)
self.tm_supported = ["auto", "none", "numactl", "taskset"]
def add_params(self, parser):
group = parser.add_argument_group("Multi-instance Arguments")
# multi-instance control
group.add_argument(
"--ninstances",
default=0,
type=int,
help="Number of instances",
)
group.add_argument(
"--instance-idx",
"--instance_idx",
default="",
type=str,
help="Inside the multi instance list, execute a specific instance at indices. \
If it is set to -1 or empty, run all of them.",
)
group.add_argument(
"--use-logical-cores",
"--use_logical_cores",
action="store_true",
default=False,
help="Use logical cores on the workloads or not. By default, only physical cores are used.",
)
group.add_argument(
"--skip-cross-node-cores",
"--skip_cross_node_cores",
action="store_true",
default=False,
help="Allow instances to be executed on cores across NUMA nodes.",
)
group.add_argument(
"--multi-task-manager",
"--multi_task_manager",
default="auto",
type=str,
choices=self.tm_supported,
help=f"Choose which multi task manager to run the workloads with. Supported choices are \
{self.tm_supported}.",
)
group.add_argument(
"--latency-mode",
"--latency_mode",
action="store_true",
default=False,
help="Use 4 cores per instance over all physical cores.",
)
group.add_argument(
"--throughput-mode",
"--throughput_mode",
action="store_true",
default=False,
help="Run one instance per node with all physical cores.",
)
group.add_argument(
"--cores-list",
"--cores_list",
default="",
type=str,
help='Specify cores list for multiple instances to run on, in format of list of single core ids \
"core_id,core_id,..." or list of core ranges "core_id-core_id,...". \
By default all cores will be used.',
)
group.add_argument(
"--benchmark",
action="store_true",
default=False,
help="Enable benchmark config. JeMalloc's MALLOC_CONF has been tuned for low latency. \
Recommend to use this for benchmarking purpose; for other use cases, \
this MALLOC_CONF may cause Out-of-Memory crash.",
)
def is_command_available(self, cmd):
is_available = False
try:
cmd_s = ["which", cmd]
r = subprocess.run(
cmd_s,
env=os.environ,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if r.returncode == 0:
is_available = True
except FileNotFoundError as e:
pass
return is_available
def set_multi_task_manager(self, multi_task_manager="auto", skip_list=None):
"""
Set multi-task manager
"""
if skip_list is None:
skip_list = []
tm_bin_name = {
"numactl": ["numactl", ""],
"taskset": ["taskset", ""],
}
tm_local = self.set_lib_bin_from_list(
multi_task_manager,
tm_bin_name,
"multi-task manager",
self.tm_supported,
self.is_command_available,
skip_list,
)
return tm_local
def execution_command_builder(
self, args, omp_runtime, task_mgr, environ, cpu_pools, index
):
assert index > -1 and index <= len(
cpu_pools
), "Designated instance index for constructing execution commands is out of range."
cmd = []
environ_local = environ
pool = cpu_pools[index]
pool_txt = pool.get_pool_txt()
cores_list_local = pool_txt["cores"]
nodes_list_local = pool_txt["nodes"]
if task_mgr != self.tm_supported[1]:
params = ""
if task_mgr == "numactl":
params = f"-C {cores_list_local} "
params += f"-m {nodes_list_local}"
elif task_mgr == "taskset":
params = f"-c {cores_list_local}"
cmd.append(task_mgr)
cmd.extend(params.split())
else:
k = ""
v = ""
if omp_runtime == "default":
k = "GOMP_CPU_AFFINITY"
v = cores_list_local
elif omp_runtime == "intel":
k = "KMP_AFFINITY"
v = f"granularity=fine,proclist=[{cores_list_local}],explicit"
if k != "":
self.verbose("info", "==========")
self.verbose("info", f"env: {k}={v}")
environ_local[k] = v
if not args.no_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
cmd.append(args.program)
log_name = f'{args.log_file_prefix}_instance_{index}_cores_{cores_list_local.replace(",", "_")}.log'
log_name = os.path.join(args.log_dir, log_name)
cmd.extend(args.program_args)
cmd_s = " ".join(cmd)
if args.log_dir:
cmd_s = f"{cmd_s} 2>&1 | tee {log_name}"
self.verbose("info", f"cmd: {cmd_s}")
if len(set([c.node for c in pool])) > 1:
self.verbose(
"warning",
f"Cross NUMA nodes execution detected: cores [{cores_list_local}] are on different NUMA nodes [{nodes_list_local}]",
)
process = subprocess.Popen(cmd_s, env=environ_local, shell=True)
return {"process": process, "cmd": cmd_s}
def launch(self, args):
if args.latency_mode and args.throughput_mode:
raise RuntimeError(
"Argument latency_mode and throughput_mode cannot be set at the same time."
)
if args.latency_mode:
if (
args.ninstances > 0
or args.ncores_per_instance > 0
or len(args.nodes_list) > 0
or args.use_logical_cores
):
self.verbose(
"warning",
"--latency-mode is exclusive to --ninstances, --ncores-per-instance, --nodes-list and \
--use-logical-cores. They won't take effect even if they are set explicitly.",
)
args.ncores_per_instance = 4
args.ninstances = 0
args.use_logical_cores = False
if args.throughput_mode:
if (
args.ninstances > 0
or args.ncores_per_instance > 0
or len(args.nodes_list) > 0
or args.use_logical_cores
):
self.verbose(
"warning",
"--throughput-mode is exclusive to --ninstances, --ncores-per-instance, --nodes-list and \
--use-logical-cores. They won't take effect even if they are set explicitly.",
)
args.ninstances = len(set([c.node for c in self.cpuinfo.pool_all]))
args.ncores_per_instance = 0
args.use_logical_cores = False
cores_list = self.parse_list_argument(args.cores_list)
nodes_list = self.parse_list_argument(args.nodes_list)
self.cpuinfo.gen_pools_ondemand(
ninstances=args.ninstances,
ncores_per_instance=args.ncores_per_instance,
use_logical_cores=args.use_logical_cores,
use_e_cores=args.use_e_cores,
skip_cross_node_cores=args.skip_cross_node_cores,
nodes_list=nodes_list,
cores_list=cores_list,
)
args.ninstances = len(self.cpuinfo.pools_ondemand)
args.ncores_per_instance = len(self.cpuinfo.pools_ondemand[0])
is_iomp_set = False
for item in self.ld_preload:
if item.endswith("libiomp5.so"):
is_iomp_set = True
break
is_kmp_affinity_set = True if "KMP_AFFINITY" in os.environ else False
set_kmp_affinity = True
# When using all cores on all nodes, including logical cores, setting KMP_AFFINITY disables logical cores. \
# Thus, KMP_AFFINITY should not be set.
if args.use_logical_cores and len(
set([c for p in self.cpuinfo.pools_ondemand for c in p])
) == len(self.cpuinfo.pool_all):
assert (
not is_kmp_affinity_set
), 'Environment variable "KMP_AFFINITY" is detected. Please unset it when using all cores.'
set_kmp_affinity = False
self.set_memory_allocator(args.memory_allocator, args.benchmark)
omp_runtime = self.set_omp_runtime(args.omp_runtime, set_kmp_affinity)
self.add_env("OMP_NUM_THREADS", str(args.ncores_per_instance))
skip_list = []
if is_iomp_set and is_kmp_affinity_set:
skip_list.append("numactl")
task_mgr = self.set_multi_task_manager(
args.multi_task_manager, skip_list=skip_list
)
# Set environment variables for multi-instance execution
self.verbose(
"info", "env: Untouched preset environment variables are not displayed."
)
environ_local = {}
for k, v in os.environ.items():
if k == "LD_PRELOAD":
continue
environ_local[k] = v
if len(self.ld_preload) > 0:
environ_local["LD_PRELOAD"] = ":".join(self.ld_preload)
self.verbose("info", f'env: LD_PRELOAD={environ_local["LD_PRELOAD"]}')
for k, v in self.environ_set.items():
if task_mgr == self.tm_supported[1]:
if omp_runtime == "default" and k == "GOMP_CPU_AFFINITY":
continue
if omp_runtime == "intel" and k == "KMP_AFFINITY":
continue
self.verbose("info", f"env: {k}={v}")
environ_local[k] = v
if args.auto_ipex:
args.program = auto_ipex.apply_monkey_patch(
args.program,
args.dtype,
args.auto_ipex_verbose,
args.disable_ipex_graph_mode,
)
instances_available = list(range(args.ninstances))
instance_idx = self.parse_list_argument(args.instance_idx)
if -1 in instance_idx:
instance_idx.clear()
if len(instance_idx) == 0:
instance_idx.extend(instances_available)
instance_idx.sort()
instance_idx = list(set(instance_idx))
assert set(instance_idx).issubset(
set(instances_available)
), "Designated nodes list contains invalid nodes."
processes = []
for i in instance_idx:
process = self.execution_command_builder(
args=args,
omp_runtime=omp_runtime,
task_mgr=task_mgr,
environ=environ_local,
cpu_pools=self.cpuinfo.pools_ondemand,
index=i,
)
processes.append(process)
try:
for process in processes:
p = process["process"]
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(
returncode=p.returncode, cmd=process["cmd"]
)
finally:
if args.auto_ipex:
# Clean the temp file
if os.path.exists(args.program) and args.program.endswith("_auto_ipex"):
os.remove(args.program)
if __name__ == "__main__":
pass
| 12,435 | 36.914634 | 132 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/launch/launch.py | import platform
import os
import glob
import argparse
from argparse import SUPPRESS, OPTIONAL, ZERO_OR_MORE
import logging
from datetime import datetime
import intel_extension_for_pytorch.cpu.auto_ipex as auto_ipex
from .launcher_distributed import DistributedTrainingLauncher
from .launcher_multi_instances import MultiInstancesLauncher
"""
This is a script for launching PyTorch training and inference on Intel Xeon CPU with optimal configurations.
Now, single instance inference/training, multi-instance inference/training and distributed training
with oneCCL backend is enabled.
To get the peak performance on Intel Xeon CPU, the script optimizes the configuration of thread and memory
management. For thread management, the script configures thread affinity and the preload of Intel OMP library.
For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc).
**How to use this module:**
*** Single instance inference/training ***
1. Run single-instance inference or training on a single node with all CPU nodes.
::
>>> ipexrun --throughput-mode script.py args
2. Run single-instance inference or training on a single CPU node.
::
>>> ipexrun --nodes-list 1 script.py args
*** Multi-instance inference ***
1. Multi-instance
By default, one instance per node. if you want to set the instance numbers and core per instance,
--ninstances and --ncores-per-instance should be set.
>>> ipexrun python_script args
eg: on CLX8280 with 14 instance, 4 cores per instance
::
>>> ipexrun --ninstances 14 --ncores-per-instance 4 python_script args
2. Run single-instance inference among multiple instances.
By default, runs all ninstances. If you want to independently run a single instance among ninstances, specify instance_idx.
eg: run 0th instance among SKX with 2 instance (i.e., numactl -C 0-27)
::
>>> ipexrun --ninstances 2 --instance-idx 0 python_script args
eg: run 1st instance among SKX with 2 instance (i.e., numactl -C 28-55)
::
>>> ipexrun --ninstances 2 --instance-idx 1 python_script args
eg: run 0th instance among SKX with 2 instance, 2 cores per instance, first four cores (i.e., numactl -C 0-1)
::
>>> ipexrun --cores-list "0-3" --ninstances 2 --ncores-per-instance 2 --instance-idx 0 python_script args
*** Distributed Training ***
spawns up multiple distributed training processes on each of the training nodes. For intel_extension_for_pytorch, oneCCL
is used as the communication backend and MPI used to launch multi-proc. To get the better
performance, you should specify the different cores for oneCCL communication and computation
process seperately. This tool can automatically set these ENVs(such as I_MPI_PIN_DOMIN) and launch
multi-proc for you.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
1. Single-Node multi-process distributed training
::
>>> ipexrun --nnodes N python_script --arg1 --arg2 --arg3 and all other
arguments of your training script
2. Multi-Node multi-process distributed training: (e.g. two nodes)
rank 0: *(IP: 192.168.10.10, and has a free port: 29500)*
::
>>> ipexrun --nnodes 2 --nprocs-per-node=xxx
--hostfile hostfile python_sript --arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> ipexrun --help
*** Memory allocator ***
Memory allocator plays an important role from performance perspective as well. A more efficient memory usage reduces
overhead on unnecessary memory allocations or destructions, and thus results in a faster execution. JeMalloc and
TCMalloc can be used as substitution of the default memory allocator. It is as easy as setting the
`--memory-allocator` argument to either of `auto`, `default`, `jemalloc` and `tcmalloc`. Setting it to `auto` tries
searching availability of the memory allocator in order of `tcmalloc`, 'jemalloc` and 'default`.
"""
def add_deprecated_params(parser):
group = parser.add_argument_group("Deprecated Arguments")
group.add_argument(
"--nproc_per_node",
metavar="\b",
type=int,
default=-1,
help="Deprecated by --nprocs-per-node.",
)
group.add_argument(
"--more_mpi_params",
metavar="\b",
type=str,
default="",
help="Deprecated by --extra-mpi-params.",
)
group.add_argument(
"--ncore_per_instance",
metavar="\b",
type=int,
default=-1,
help="Deprecated by --ncores-per-instance.",
)
group.add_argument(
"--node_id",
metavar="\b",
type=int,
default=-1,
help="Deprecated by --nodes-list.",
)
group.add_argument(
"--core_list",
metavar="\b",
type=str,
default="",
help="Deprecated by --cores-list.",
)
group.add_argument(
"--logical_core_for_ccl",
action="store_true",
default=False,
help="Deprecated by --logical-cores-for-ccl.",
)
group.add_argument(
"--enable_tcmalloc",
action="store_true",
default=False,
help="Deprecated by --memory-allocator.",
)
group.add_argument(
"--enable_jemalloc",
action="store_true",
default=False,
help="Deprecated by --memory-allocator.",
)
group.add_argument(
"--use_default_allocator",
action="store_true",
default=False,
help="Deprecated by --memory-allocator.",
)
group.add_argument(
"--use_logical_core",
action="store_true",
default=False,
help="Deprecated by --use-logical-cores.",
)
group.add_argument(
"--disable_numactl",
action="store_true",
default=False,
help="Deprecated by --multi-task-manager.",
)
group.add_argument(
"--disable_taskset",
action="store_true",
default=False,
help="Deprecated by --multi-task-manager.",
)
group.add_argument(
"--disable_iomp",
action="store_true",
default=False,
help="Deprecated by --omp-runtime.",
)
group.add_argument(
"--log_path", type=str, default="", help="Deprecated by --log-dir."
)
group.add_argument(
"--multi_instance",
action="store_true",
default=False,
help="Deprecated. Will be removed.",
)
group.add_argument(
"--distributed",
action="store_true",
default=False,
help="Deprecated. Will be removed.",
)
def process_deprecated_params(args, logger):
if args.nproc_per_node != -1:
logger.warning("Argument --nproc_per_node is deprecated by --nprocs-per-node.")
args.nprocs_per_node = args.nproc_per_node
if args.more_mpi_params != "":
logger.warning(
"Argument --more_mpi_params is deprecated by --extra-mpi-params."
)
args.extra_mpi_params = args.more_mpi_params
if args.ncore_per_instance != -1:
logger.warning(
"Argument --ncore_per_instance is deprecated by --ncores-per-instance."
)
args.ncores_per_instance = args.ncore_per_instance
if args.node_id != -1:
logger.warning("Argument --node_id is deprecated by --nodes-list.")
args.nodes_list = str(args.node_id)
if args.core_list != "":
logger.warning("Argument --core_list is deprecated by --cores-list.")
args.cores_list = args.core_list
if args.logical_core_for_ccl:
logger.warning(
"Argument --logical_core_for_ccl is deprecated by --logical-cores-for-ccl."
)
args.logical_cores_for_ccl = args.logical_core_for_ccl
if args.use_logical_core:
logger.warning(
"Argument --use_logical_core is deprecated by --use-logical-cores."
)
args.use_logical_cores = args.use_logical_core
if args.log_path != "":
logger.warning("Argument --log_path is deprecated by --log-dir.")
args.log_dir = args.log_path
if args.multi_instance:
logger.info(
"Argument --multi_instance is deprecated. Will be removed. \
If you are using the deprecated argument, please update it to the new one."
)
if args.distributed:
logger.info(
"Argument --distributed is deprecated. Will be removed. \
If you are using the deprecated argument, please update it to the new one."
)
if args.enable_tcmalloc or args.enable_jemalloc or args.use_default_allocator:
logger.warning(
"Arguments --enable_tcmalloc, --enable_jemalloc and --use_default_allocator \
are deprecated by --memory-allocator."
)
if (
(args.enable_tcmalloc and args.enable_jemalloc)
or (args.enable_tcmalloc and args.use_default_allocator)
or (args.enable_jemalloc and args.use_default_allocator)
or (
args.enable_tcmalloc
and args.enable_jemalloc
and args.use_default_allocator
)
):
args.memory_allocator = "auto"
else:
if args.enable_tcmalloc:
args.memory_allocator = "tcmalloc"
if args.enable_jemalloc:
args.memory_allocator = "jemalloc"
if args.use_default_allocator:
args.memory_allocator = "default"
if args.disable_numactl:
logger.warning(
"Argument --disable_numactl is deprecated by --multi-task-manager."
)
args.multi_task_manager = "taskset"
if args.disable_taskset:
logger.warning(
"Argument --disable_taskset is deprecated by --multi-task-manager."
)
args.multi_task_manager = "numactl"
if args.disable_iomp:
logger.warning("Argument --disable_iomp is deprecated by --omp-runtime.")
args.omp_runtime = "default"
class ArgumentTypesDefaultsHelpFormatter(argparse.HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return "".join(indent + line for line in text.splitlines(keepends=True))
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
help = action.help
if "%(type)" not in action.help:
if action.type is not SUPPRESS:
typeing_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in typeing_nargs:
help += " (type: %(type)s)"
if "%(default)" not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += " (default: %(default)s)"
return help
def init_parser(parser):
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser.add_argument(
"-m",
"--module",
default=False,
action="store_true",
help="Changes each process to interpret the launch script "
'as a python module, executing with the same behavior as "python -m".',
)
parser.add_argument(
"--no-python",
"--no_python",
default=False,
action="store_true",
help="Avoid applying python to execute program.",
)
parser.add_argument(
"--log-dir",
"--log_dir",
default="",
type=str,
help="The log file directory. Setting it to empty disables logging to files.",
)
parser.add_argument(
"--log-file-prefix",
"--log_file_prefix",
default="run",
type=str,
help="log file name prefix",
)
# positional
parser.add_argument(
"program",
type=str,
help="Full path to the proram/script to be launched. "
"followed by all the arguments for the script",
)
# rest from the training program
parser.add_argument(
"program_args",
nargs=argparse.REMAINDER,
)
launcher_distributed = DistributedTrainingLauncher()
launcher_multi_instances = MultiInstancesLauncher()
launcher_multi_instances.add_common_params(parser)
launcher_multi_instances.add_params(parser)
launcher_distributed.add_params(parser)
auto_ipex.add_auto_ipex_params(parser)
add_deprecated_params(parser)
return parser
def run_main_with_args(args):
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported!!!")
format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=format_str)
logger = logging.getLogger(__name__)
launcher_distributed = DistributedTrainingLauncher(logger)
launcher_multi_instances = MultiInstancesLauncher(logger)
process_deprecated_params(args, logger)
if args.log_dir:
path = os.path.dirname(
args.log_dir if args.log_dir.endswith("/") else f"{args.log_dir}/"
)
if not os.path.exists(path):
os.makedirs(path)
args.log_dir = path
args.log_file_prefix = (
f'{args.log_file_prefix}_{datetime.now().strftime("%Y%m%d%H%M%S")}'
)
fileHandler = logging.FileHandler(
f"{args.log_dir}/{args.log_file_prefix}_instances.log"
)
logFormatter = logging.Formatter(format_str)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
assert args.no_python or args.program.endswith(
".py"
), 'For non Python script, you should use "--no-python" parameter.'
env_before = set(os.environ.keys())
# Verify LD_PRELOAD
if "LD_PRELOAD" in os.environ:
lst_valid = []
tmp_ldpreload = os.environ["LD_PRELOAD"]
for item in tmp_ldpreload.split(":"):
if item != "":
matches = glob.glob(item)
if len(matches) > 0:
lst_valid.append(item)
else:
logger.warning(
f"{item} doesn't exist. Removing it from LD_PRELOAD."
)
if len(lst_valid) > 0:
os.environ["LD_PRELOAD"] = ":".join(lst_valid)
else:
os.environ["LD_PRELOAD"] = ""
launcher = None
if args.nnodes > 0:
launcher = launcher_distributed
else:
launcher = launcher_multi_instances
launcher.launch(args)
for x in sorted(set(os.environ.keys()) - env_before):
logger.debug(f"{x}={os.environ[x]}")
| 15,263 | 32.473684 | 126 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/autocast/_autocast_mode.py | import torch
import intel_extension_for_pytorch._C as core
import warnings
from typing import Any, Optional
from torch.types import _dtype
# Expand torch.amp.autocast_mode.autocast to support both torch.bfloat16 and torch.float16 on cpu.
class _mode_autocast(torch.amp.autocast_mode.autocast):
def __init__(
self,
device_type: str,
dtype: Optional[_dtype] = None,
enabled: bool = True,
cache_enabled: Optional[bool] = None,
):
if torch._jit_internal.is_scripting():
self._enabled = enabled
self.device = device_type
self.fast_dtype = dtype
# TODO: support get_autocast_gpu/cpu_dtype
assert dtype is not None
return
self.device = device_type
if self.device == "cuda":
self.fast_dtype = torch.get_autocast_gpu_dtype()
elif self.device == "cpu":
self.fast_dtype = torch.get_autocast_cpu_dtype()
elif self.device == "xpu":
self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
else:
raise RuntimeError(
"User specified autocast device_type must be 'cuda' or 'cpu'"
)
self._cache_enabled = torch.is_autocast_cache_enabled()
if (
enabled
and self.device == "cuda"
and torch.cuda.amp.common.amp_definitely_not_available()
):
warnings.warn(
"User provided device_type of 'cuda', but CUDA is not available. Disabling"
)
enabled = False
if dtype is not None:
self.fast_dtype = dtype
if cache_enabled is not None:
self._cache_enabled = cache_enabled
if self.device == "cpu":
supported_dtype = [torch.bfloat16, torch.float16]
if self.fast_dtype not in supported_dtype:
error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n"
error_message += "CPU Autocast only supports dtype of torch.bfloat16 and torch.float16 currently."
warnings.warn(error_message)
enabled = False
if self.device == "xpu":
supported_dtype = [torch.bfloat16, torch.float16]
if self.fast_dtype not in supported_dtype:
error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n"
error_message += (
"XPU Autocast only supports dtype of torch.bfloat16 currently."
)
warnings.warn(error_message)
enabled = False
if self.device == "cuda":
if self.fast_dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
raise RuntimeError(
"Current CUDA Device does not support bfloat16. Please switch dtype to float16."
)
self._enabled = enabled
# same as torch.cpu.amp.autocast
class autocast_cpu(_mode_autocast):
r"""
See :class:`torch.autocast`.
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
"""
def __init__(
self,
enabled: bool = True,
dtype: torch.dtype = torch.bfloat16,
cache_enabled: bool = True,
):
if torch._jit_internal.is_scripting():
self._enabled = enabled
self.device = "cpu"
self.fast_dtype = dtype
return
super().__init__(
"cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
)
def __enter__(self):
if torch._jit_internal.is_scripting():
return self
return super().__enter__()
# TODO: discuss a unified TorchScript-friendly API for autocast
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
if torch._jit_internal.is_scripting():
return
return super().__exit__(exc_type, exc_val, exc_tb)
def __call__(self, func):
if torch._jit_internal.is_scripting():
return func
return super().__call__(func)
# Expand torch.cpu.amp.autocast to support both torch.bfloat16 & torch.half
# and support the disabling of cache_enabled for autocast within jit.trace.
class _autocast(autocast_cpu):
def __enter__(self):
self.prev_cache_enabled = torch.is_autocast_cache_enabled()
self.prev = torch.is_autocast_cpu_enabled()
self.prev_fast_dtype = core.get_autocast_dtype()
torch.set_autocast_cpu_enabled(self._enabled)
core.set_autocast_dtype(self.fast_dtype)
torch.autocast_increment_nesting()
torch.set_autocast_cache_enabled(self._cache_enabled)
def __exit__(self, *args):
# Drop the cache when we exit to a nesting level that's outside any instance of autocast.
if torch.autocast_decrement_nesting() == 0:
core.clear_autocast_cache()
torch.clear_autocast_cache()
torch.set_autocast_cpu_enabled(self.prev)
core.set_autocast_dtype(self.prev_fast_dtype)
torch.set_autocast_cache_enabled(self.prev_cache_enabled)
return False
if core._has_cpu():
torch.cpu.amp.autocast = _autocast
| 5,322 | 37.572464 | 114 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/autocast/_grad_scaler.py | # Copy grad scaler from PyTorch for fp16 on CPU
import torch
from collections import defaultdict, abc
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import intel_extension_for_pytorch._C as core
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor: torch.Tensor) -> None:
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
# Defines default_factory for GradScaler's _per_optimizer_states defaultdict,
# as well as associated "enum" values. Prefers defining these at top level because
# - Lambdas can't be pickled, so we don't want to supply a lambda as the factory.
# - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler
# causes a circular reference, which we'd rather avoid.
class OptState(Enum):
READY = 0
UNSCALED = 1
STEPPED = 2
def _refresh_per_optimizer_state():
return {"stage": OptState.READY, "found_inf_per_device": {}}
class GradScaler(object):
_scale: Optional[torch.Tensor]
_grows_tracker: Optional[torch.Tensor]
_per_optimizer_states: Dict[int, Dict[str, Any]]
"""
An instance ``scaler`` of :class:`GradScaler` helps perform the steps of gradient scaling
conveniently.
* ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
* ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
* ``scaler.update()`` updates ``scaler``'s scale factor.
Example::
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales gradients of the optimizer's params.
# If gradients don't contain infs/NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See the :ref:`Automatic Mixed Precision examples<amp-examples>` for usage
(along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty,
and multiple losses/optimizers.
``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if
the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
without incurring inf or NaN gradient values.
``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
* If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
* If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
``growth_factor``.
The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
Args:
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional, default=True): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
"""
def __init__(
self,
init_scale=2.0**16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
enabled=True,
):
self._enabled = enabled
if self._enabled:
assert growth_factor > 1.0, "The growth factor must be > 1.0."
assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
self._init_scale = init_scale
# self._scale will be lazily initialized during the first call to scale()
self._scale = None
self._growth_factor = growth_factor
self._backoff_factor = backoff_factor
self._growth_interval = growth_interval
self._init_growth_tracker = 0
# self._growth_tracker will be lazily initialized during the first call to scale()
self._growth_tracker = None
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def _check_scale_growth_tracker(
self, funcname
) -> Tuple[torch.Tensor, torch.Tensor]:
fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
assert self._scale is not None, (
"Attempted {} but _scale is None. ".format(funcname) + fix
)
assert self._growth_tracker is not None, (
"Attempted {} but _growth_tracker is None. ".format(funcname) + fix
)
return (self._scale, self._growth_tracker)
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full(
(1,), self._init_scale, dtype=torch.float32, device=dev
)
self._growth_tracker = torch.full(
(1,), self._init_growth_tracker, dtype=torch.int32, device=dev
)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Args:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: List[
_MultiDeviceReplicator
] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val):
if isinstance(val, torch.Tensor):
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_MultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
elif isinstance(val, abc.Iterable):
iterable = map(apply_scale, val)
if isinstance(val, list) or isinstance(val, tuple):
return type(val)(iterable)
else:
return iterable
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16):
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
per_device_found_inf = _MultiDeviceReplicator(found_inf)
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
# There could be hundreds of grads, so we'd like to iterate through them just once.
# However, we don't know their devices or dtypes in advance.
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
# Google says mypy struggles with defaultdicts type annotations.
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
# sync grad to master weight
if hasattr(optimizer, "sync_grad"):
optimizer.sync_grad()
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
param.grad = param.grad.coalesce()
to_unscale = param.grad._values()
else:
to_unscale = param.grad
# TODO: is there a way to split by device and dtype without appending in the inner loop?
per_device_and_dtype_grads[to_unscale.device][
to_unscale.dtype
].append(to_unscale)
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
core._amp_foreach_non_finite_check_and_unscale_(
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer):
"""
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
:meth:`unscale_` is optional, serving cases where you need to
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
between the backward pass(es) and :meth:`step`.
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
...
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
scaler.step(optimizer)
scaler.update()
Args:
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
.. warning::
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
and only after all gradients for that optimizer's assigned parameters have been accumulated.
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
.. warning::
:meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.UNSCALED:
raise RuntimeError(
"unscale_() has already been called on this optimizer since the last update()."
)
elif optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full(
(1,), 0.0, dtype=torch.float32, device=self._scale.device
)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(
optimizer, inv_scale, found_inf, False
)
optimizer_state["stage"] = OptState.UNSCALED
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
retval = None
if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
if hasattr(optimizer, "step_sync_weight"):
retval = optimizer.step_sync_weight(*args, **kwargs)
else:
retval = optimizer.step(*args, **kwargs)
return retval
def step(self, optimizer, *args, **kwargs):
"""
:meth:`step` carries out the following two operations:
1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Returns the return value of ``optimizer.step(*args, **kwargs)``.
Args:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
.. warning::
Closure use is not currently supported.
"""
if not self._enabled:
return optimizer.step(*args, **kwargs)
if "closure" in kwargs:
raise RuntimeError(
"Closure use is not currently supported if GradScaler is enabled."
)
self._check_scale_growth_tracker("step")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError(
"step() has already been called since the last update()."
)
retval = None
if (
hasattr(optimizer, "_step_supports_amp_scaling")
and optimizer._step_supports_amp_scaling
):
# This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
# The contract with custom optimizers is that their step() should accept an additional,
# optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
# it can query its own state, invoke unscale_ on itself, etc
retval = optimizer.step(*args, **dict(kwargs, grad_scaler=self))
optimizer_state["stage"] = OptState.STEPPED
return retval
if optimizer_state["stage"] is OptState.READY:
self.unscale_(optimizer)
assert (
len(optimizer_state["found_inf_per_device"]) > 0
), "No inf checks were recorded for this optimizer."
retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs)
optimizer_state["stage"] = OptState.STEPPED
return retval
def update(self, new_scale=None):
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
core._amp_update_scale_(
_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval,
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def get_scale(self):
"""
Returns a Python float containing the current scale, or 1.0 if scaling is disabled.
"""
if self._enabled:
return self._init_scale if self._scale is None else self._scale
else:
return 1.0
def get_growth_factor(self):
r"""
Returns a Python float containing the scale growth factor.
"""
return self._growth_factor
def set_growth_factor(self, new_factor):
r"""
Args:
new_scale (float): Value to use as the new scale growth factor.
"""
self._growth_factor = new_factor
def get_backoff_factor(self):
r"""
Returns a Python float containing the scale backoff factor.
"""
return self._backoff_factor
def set_backoff_factor(self, new_factor):
r"""
Args:
new_scale (float): Value to use as the new scale backoff factor.
"""
self._backoff_factor = new_factor
def get_growth_interval(self):
r"""
Returns a Python int containing the growth interval.
"""
return self._growth_interval
def set_growth_interval(self, new_interval):
r"""
Args:
new_interval (int): Value to use as the new growth interval.
"""
self._growth_interval = new_interval
def _get_growth_tracker(self):
if self._enabled:
return (
self._init_growth_tracker
if self._growth_tracker is None
else self._growth_tracker.item()
)
else:
return 0
def is_enabled(self):
r"""
Returns a bool indicating whether this instance is enabled.
"""
return self._enabled
def state_dict(self):
r"""
Returns the state of the scaler as a :class:`dict`. It contains five entries:
* ``"scale"`` - a Python float containing the current scale
* ``"growth_factor"`` - a Python float containing the current growth factor
* ``"backoff_factor"`` - a Python float containing the current backoff factor
* ``"growth_interval"`` - a Python int containing the current growth interval
* ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
If this instance is not enabled, returns an empty dict.
.. note::
If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
should be called after :meth:`update`.
"""
return (
{
"scale": self.get_scale(),
"growth_factor": self._growth_factor,
"backoff_factor": self._backoff_factor,
"growth_interval": self._growth_interval,
"_growth_tracker": self._get_growth_tracker(),
}
if self._enabled
else {}
)
def __getstate__(self):
state = self.__dict__.copy()
if self._enabled:
assert len(self._per_optimizer_states) == 0, (
"A GradScaler instance may only be pickled at the beginning "
"of an iteration, or at the end after scaler.update()."
)
# Pickling _scale and _growth_tracker Tensors directly triggers
# "warnings.warn("pickle support for Storage will be removed in 1.5..."
# so instead, we set the unpickled instance up to reinitialize them lazily.
state["_init_scale"] = self.get_scale()
state["_init_growth_tracker"] = self._get_growth_tracker()
state["_scale"] = None
state["_growth_tracker"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
def _check_inf_per_device(self, optimizer):
_scale, _ = self._check_scale_growth_tracker("_check_inf_per_device")
dummy_inv_scale = torch.full(
(1,), 1.0, dtype=torch.float32, device=_scale.device
)
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=_scale.device)
self._per_optimizer_states[id(optimizer)][
"found_inf_per_device"
] = self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
def _found_inf_per_device(self, optimizer):
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
torch.cpu.amp.GradScaler = GradScaler
| 24,298 | 44.333955 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/runtime/multi_stream.py | import torch
import torch.nn as nn
from typing import Union
import intel_extension_for_pytorch._C as core
from .cpupool import CPUPool
from .task import Task
import copy
import warnings
class MultiStreamModuleHint(object):
r"""
MultiStreamModuleHint is a hint to MultiStreamModule about how to split the inputs
or concat the output. Each argument should be None, with type of int or a container
which containes int or None such as: (0, None, ...) or [0, None, ...]. If the argument
is None, it means this argument will not be split or concat. If the argument is with
type int, its value means along which dim this argument will be split or concat.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
intel_extension_for_pytorch.cpu.runtime.MultiStreamModuleHint: Generated
intel_extension_for_pytorch.cpu.runtime.MultiStreamModuleHint object.
:meta public:
"""
def __init__(self, *args, **kwargs):
self.args = list(args)
self.kwargs = kwargs
self.args_len = args.__len__()
self.kwargs_len = kwargs.__len__()
default_multi_stream_module_split_hint = MultiStreamModuleHint(0)
default_multi_stream_module_concat_hint = MultiStreamModuleHint(0)
def get_default_num_streams(cpu_pool):
# One core per stream usually brings better overall throughput than other configurations.
# Therefore, we heuristically make one core per stream the default here.
return cpu_pool.core_ids.__len__()
class MultiStreamModule(nn.Module):
r"""
MultiStreamModule supports inference with multi-stream throughput mode.
If the number of cores inside ``cpu_pool`` is divisible by ``num_streams``,
the cores will be allocated equally to each stream. If the number of cores
inside ``cpu_pool`` is not divisible by ``num_streams`` with remainder N,
one extra core will be allocated to the first N streams. We suggest to set
the ``num_streams`` as divisor of core number inside ``cpu_pool``.
If the inputs' batchsize is larger than and divisible by ``num_streams``,
the batchsize will be allocated equally to each stream. If batchsize is not
divisible by ``num_streams`` with remainder N, one extra piece will be
allocated to the first N streams. If the inputs' batchsize is less than
``num_streams``, only the first batchsize's streams are used with mini batch
as one. We suggest to set inputs' batchsize larger than and divisible by
``num_streams``. If you don't want to tune the num of streams and leave it
as "AUTO", we suggest to set inputs' batchsize larger than and divisible by
number of cores.
Args:
model (torch.jit.ScriptModule or torch.nn.Module): The input model.
num_streams (Union[int, str]): Number of instances (int) or "AUTO" (str). "AUTO" means the stream number
will be selected automatically. Although "AUTO" usually provides a
reasonable performance, it may still not be optimal for some cases which
means manual tuning for number of streams is needed for this case.
cpu_pool (intel_extension_for_pytorch.cpu.runtime.CPUPool): An
intel_extension_for_pytorch.cpu.runtime.CPUPool object, contains
all CPU cores used to run multi-stream inference.
concat_output (bool): A flag indicates whether the output of each
stream will be concatenated or not. The default value is True. Note:
if the output of each stream can't be concatenated, set this flag to
false to get the raw output (a list of each stream's output).
input_split_hint (MultiStreamModuleHint): Hint to MultiStreamModule about
how to split the inputs.
output_concat_hint (MultiStreamModuleHint): Hint to MultiStreamModule about
how to concat the outputs.
Returns:
intel_extension_for_pytorch.cpu.runtime.MultiStreamModule: Generated
intel_extension_for_pytorch.cpu.runtime.MultiStreamModule object.
:meta public:
"""
def __init__(
self,
model,
num_streams: Union[int, str] = "AUTO",
cpu_pool: CPUPool = CPUPool(),
concat_output: bool = True,
input_split_hint: MultiStreamModuleHint = default_multi_stream_module_split_hint,
output_concat_hint: MultiStreamModuleHint = default_multi_stream_module_concat_hint,
):
super(MultiStreamModule, self).__init__()
assert (
type(cpu_pool) is CPUPool
), "Input of cpu_pool must be provided with type of ipex.cpu.runtime.CPUPool"
if not isinstance(model, torch.jit.ScriptModule):
warnings.warn(
"Creating MultiStreamModule on an nn.Module. This can be slow due "
"to Python Global Interpreter Lock (GIL). Suggest to use JIT ScriptModule for better performance."
)
self.cpu_pool = cpu_pool
self.core_list = cpu_pool.core_ids
if isinstance(num_streams, str):
# For str input of num_streams, it must be "auto"
if num_streams.upper() == "AUTO":
self.num_streams = get_default_num_streams(
cpu_pool
) # The default selected value when auto selection is on.
else:
AssertionError(
False
), 'Input of num_streams must be Number of instances or string "AUTO"'
else:
assert isinstance(
num_streams, int
), 'Input of num_streams must be Number of instances or string "auto"'
self.num_streams = num_streams
if self.num_streams > self.core_list.__len__():
self.num_streams = self.core_list.__len__()
warnings.warn(
"The number of streams is larger than number of cores. The number of streams changes to {}.".format(
self.num_streams
)
)
if self.num_streams == 1:
# Sync execution path if num_stream is 1.
self.model = model
else:
self.cores_per_instance = self.core_list.__len__() // self.num_streams
num_stream_allocated_extra_core = (
self.core_list.__len__() % self.num_streams
)
self.tasks = []
start_core_list_idx = 0
end_core_list_idx = 0
for j in range(self.num_streams):
if j < num_stream_allocated_extra_core:
# If the core number is not divisible by stream number,
# the remainder streams(num_stream_allocated_extra_core) will be allocated one extra core.
end_core_list_idx += self.cores_per_instance + 1
else:
end_core_list_idx += self.cores_per_instance
self.tasks.append(
Task(
model,
CPUPool(self.core_list[start_core_list_idx:end_core_list_idx]),
)
)
start_core_list_idx = end_core_list_idx
self.concat_output = concat_output
self.input_split_hint = input_split_hint
self.output_concat_hint = output_concat_hint
# Deep copy the input structure for each stream based on input_split_hint.
# Each streams_input will be recursively visited and set to the split value in place.
self.args_streams_input = []
self.kwargs_streams_input = []
for _ in range(self.num_streams):
self.args_streams_input.append(copy.deepcopy(self.input_split_hint.args))
self.kwargs_streams_input.append(
copy.deepcopy(self.input_split_hint.kwargs)
)
# Deep copy the output structure based on output_concat_hint.
# self.output will be recursively visited and set to the concat value in place.
self.output = copy.deepcopy(self.output_concat_hint)
# Init status needed for forward
self.reset_forward_status()
def reset_forward_status(self):
# Since the input batchsize for each forward invoking may change
# Need to reset the status for each forward invoking
# * split_size: will be calculated by input batch size and num_streams.
# * used_num_streams: is the num_streams actually used by this forward invoking.
# It may less than self.num_streams when bs is less than self.num_streams.
# * current_split_start_idx: used to record the split start idx for current stream.
# * current_split_end_idx: used to record the split end idx for current stream.
self.split_size = None
self.used_num_streams = self.num_streams
self.current_split_start_idx = 0
self.current_split_end_idx = 0
def update_split_idx(self, stream_id):
# Set current_split_start_idx to last current_split_end_idx
self.current_split_start_idx = self.current_split_end_idx
# Calculate current_split_end_idx to new value
if stream_id < self.instance_need_extra_input:
# Tail case, when the input image size larger than num_streams and not divisible,
# the first remainder streams will have (mini_batch + 1) input size.
self.current_split_end_idx = self.current_split_end_idx + (
self.batch_per_instance + 1
)
else:
# Input image size divisible of num_streams or input image size less than num_streams.
self.current_split_end_idx = (
self.current_split_end_idx + self.batch_per_instance
)
def init_forward_status(self, split_size, stream_id):
# This function should be invoke only once at each forward
self.split_size = split_size
# Ensure each instance has input offload
self.batch_per_instance = self.split_size // self.num_streams
if self.batch_per_instance >= 1:
# The input batchsize larger or equal to num_streams.
self.used_num_streams = self.num_streams
# If input batchsize larger than num_streams and not divisible,
# the first remainder streams will have (mini_batch + 1) input size.
self.instance_need_extra_input = self.split_size % self.num_streams
else:
# The input batchsize less than num_streams,
# only the first batchsize stream will have mini_batch(1) input.
self.batch_per_instance = 1
self.used_num_streams = self.split_size
self.instance_need_extra_input = 0
self.update_split_idx(stream_id)
def _do_get_input_for_each_stream(
self, hint_object, input_object, stream_input_object, idx_or_key, stream_id
):
# * hint_object: input hint to tell whether we need to split corresponding
# input_object at current position.
# * input_object: raw input used to split and generate stream_input_object
# at current position.
# * stream_input_object: Which will generated in place as the input for
# current stream (marked by stream_id)
# * idx_or_key: idx (for list/tuple) and key (for dict) used for recursive
# visit of hint_object/input_object/stream_input_object.
# * stream_id: the stream we are visiting now.
type_arg = type(hint_object[idx_or_key])
if type_arg in [list]:
for i in range(hint_object[idx_or_key].__len__()):
self._do_get_input_for_each_stream(
hint_object[idx_or_key],
input_object[idx_or_key],
stream_input_object[idx_or_key],
i,
stream_id,
)
if type_arg in [tuple]:
# Tuple doesn't support item change in place
# So we change it to list for next recursion and change it back to tuple.
temp = list(stream_input_object[idx_or_key])
for i in range(hint_object[idx_or_key].__len__()):
self._do_get_input_for_each_stream(
hint_object[idx_or_key],
input_object[idx_or_key],
temp,
i,
stream_id,
)
stream_input_object[idx_or_key] = tuple(temp)
elif type_arg in [dict]:
for key in hint_object[idx_or_key]:
self._do_get_input_for_each_stream(
hint_object[idx_or_key],
input_object[idx_or_key],
stream_input_object[idx_or_key],
key,
stream_id,
)
elif (type_arg is int) or (hint_object[idx_or_key] is None):
if hint_object[idx_or_key] is not None:
# If user tells us to split in this object,
if self.split_size is None:
# Init the input status for each stream here
# Here the stream_id must be 0
self.init_forward_status(
input_object[idx_or_key].size(hint_object[idx_or_key]),
stream_id,
)
# Get the split input for each stream
# Here we assume split along the outside dim, otherwise memory copy happens and obviously \
# hurt multi stream module's performance.
if hint_object[idx_or_key] == 0:
# Split along dim 0, the slice will not create new tensor
stream_input_object[idx_or_key] = input_object[idx_or_key][
self.current_split_start_idx : self.current_split_end_idx
]
else:
# Otherwise, we use torch.narrow
length = self.current_split_end_idx - self.current_split_start_idx
stream_input_object[idx_or_key] = input_object[idx_or_key].narrow(
hint_object[idx_or_key], self.current_split_start_idx, length
)
else:
# This object shouldn't be split, just set it as each stream's input
stream_input_object[idx_or_key] = input_object[idx_or_key]
else:
AssertionError(
False
), "Generate stream input failed, unsupport input hint type of:{}".format(
type_arg
)
return None
def _get_input_for_each_stream(
self, multi_stream_module_split_hint, *args, **kwargs
):
# recursive once to init:
# 1. Decide the actual self.used_num_streams (it may less than number stream when input bs is small)
# 2. Init the current_split_start_idx and current_split_end_idx for inputs split
# 3. Decide the actual input for stream_id 0
for i in range(multi_stream_module_split_hint.args_len):
self._do_get_input_for_each_stream(
hint_object=multi_stream_module_split_hint.args,
input_object=args,
stream_input_object=self.args_streams_input[0],
idx_or_key=i,
stream_id=0,
)
for key in multi_stream_module_split_hint.kwargs:
self._do_get_input_for_each_stream(
hint_object=multi_stream_module_split_hint.kwargs,
input_object=kwargs,
stream_input_object=self.kwargs_streams_input[0],
idx_or_key=key,
stream_id=0,
)
# After we get the self.used_num_streams then we can
# decide the inputs for the left of used_num_streams
for stream_id in range(1, self.used_num_streams):
# Update the split idx for current stream
self.update_split_idx(stream_id)
# Here we put stream go through as the outer for loop,
# Since we assume the multi_stream_module_split_hint is not complicated to be recursive generally.
for i in range(multi_stream_module_split_hint.args_len):
self._do_get_input_for_each_stream(
hint_object=multi_stream_module_split_hint.args,
input_object=args,
stream_input_object=self.args_streams_input[stream_id],
idx_or_key=i,
stream_id=stream_id,
)
for key in multi_stream_module_split_hint.kwargs:
self._do_get_input_for_each_stream(
hint_object=multi_stream_module_split_hint.kwargs,
input_object=kwargs,
stream_input_object=self.kwargs_streams_input[stream_id],
idx_or_key=key,
stream_id=stream_id,
)
def _do_generate_outputs(
self, hint_object, output_object, stream_output_object, idx_or_key, stream_id
):
type_arg = type(hint_object[idx_or_key])
if type_arg in [list]:
for i in range(hint_object[idx_or_key].__len__()):
self._do_generate_outputs(
hint_object[idx_or_key],
output_object[idx_or_key],
stream_output_object[idx_or_key],
i,
stream_id,
)
elif type_arg in [tuple]:
# Tuple doesn't support item change in place
# So we change it to list for next recursion and change it back to tuple.
temp = list(output_object[idx_or_key])
for i in range(hint_object[idx_or_key].__len__()):
self._do_generate_outputs(
hint_object[idx_or_key],
temp,
stream_output_object[idx_or_key],
i,
stream_id,
)
output_object[idx_or_key] = tuple(temp)
elif type_arg in [dict]:
for key in hint_object[idx_or_key]:
self._do_generate_outputs(
hint_object[idx_or_key],
output_object[idx_or_key],
stream_output_object[idx_or_key],
key,
stream_id,
)
elif (type_arg is int) or (hint_object[idx_or_key] is None):
if hint_object[idx_or_key] is not None:
if stream_id == 0:
output_object[idx_or_key] = []
output_object[idx_or_key].append(stream_output_object[idx_or_key])
else:
# This object shouldn't be concat, just copy once for stream_id = 0
if stream_id == 0:
output_object[idx_or_key] = stream_output_object[idx_or_key]
else:
AssertionError(
False
), "Generate outputs failed, unsupport output hint type of:{}".format(
type_arg
)
return None
def _generate_outputs(self, stream_output_object, stream_id):
# For each position, we will push the result generated by each stream into the list
# multi_stream_module_split_hint.args_len must be 1, since the module output will be a \
# single output or tuple for multi outputs
if self.output_concat_hint.args:
self._do_generate_outputs(
hint_object=self.output_concat_hint.args,
output_object=self.output.args,
stream_output_object=stream_output_object,
idx_or_key=0,
stream_id=stream_id,
)
if self.output_concat_hint.kwargs:
for key, value in self.output_concat_hint.kwargs.items():
self._do_generate_outputs(
hint_object=self.output_concat_hint.kwargs,
output_object=self.output.kwargs,
stream_output_object=stream_output_object[0],
idx_or_key=key,
stream_id=stream_id,
)
def _do_concat_output_for_each_stream(self, hint_object, output_object, idx_or_key):
type_arg = type(hint_object[idx_or_key])
if type_arg in [list]:
for i in range(hint_object[idx_or_key].__len__()):
self._do_concat_output_for_each_stream(
hint_object[idx_or_key], output_object[idx_or_key], i
)
if type_arg in [tuple]:
# Tuple doesn't support item change in place
# So we change it to list for next recursion and change it back to tuple.
temp = list(output_object[idx_or_key])
for i in range(hint_object[idx_or_key].__len__()):
self._do_concat_output_for_each_stream(hint_object[idx_or_key], temp, i)
output_object[idx_or_key] = tuple(temp)
elif type_arg in [dict]:
for key in hint_object[idx_or_key]:
self._do_concat_output_for_each_stream(
hint_object[idx_or_key], output_object[idx_or_key], key
)
elif (type_arg is int) or (hint_object[idx_or_key] is None):
if hint_object[idx_or_key] is not None:
output_object[idx_or_key] = torch.cat(
output_object[idx_or_key], dim=hint_object[idx_or_key]
)
else:
AssertionError(
False
), "Concat output failed, unsupport output hint type of:{}".format(type_arg)
return None
def _concat_output_for_each_stream(self):
# Concat the output, when here each position is already a List of tensors to be concat.
if self.output_concat_hint.args:
self._do_concat_output_for_each_stream(
self.output_concat_hint.args, self.output.args, 0
)
return_obj = dict()
if self.output_concat_hint.kwargs:
for key, value in self.output_concat_hint.kwargs.items():
self._do_concat_output_for_each_stream(
self.output_concat_hint.kwargs, self.output.kwargs, key
)
return_obj[key] = self.output.kwargs[key]
# If the output hint has both the args and kwargs, then we return them as a tuple.
# Otherwise, return them as it is.
if self.output_concat_hint.args and self.output_concat_hint.kwargs:
return self.output.args[0], return_obj
elif self.output_concat_hint.args:
return self.output.args[0]
else:
return return_obj
def forward(self, *args, **kwargs):
# Reset the forward status to default value which mainly contains information
# to split inputs. They will init afterwards for each forward call.
self.reset_forward_status()
if self.num_streams == 1:
# Sync execution path if num_stream is 1
if not core.is_same_core_affinity_setting(self.core_list):
# If the main thread's core affinity has been changed, we should set it again.
core.pin_cpu_cores(self.cpu_pool.cpu_pool)
results_raw = self.model(*args, **kwargs)
return results_raw if self.concat_output else [results_raw]
# Split the raw input to generate input for each stream
self._get_input_for_each_stream(self.input_split_hint, *args, **kwargs)
results_raw_future = []
results_raw = []
for stream_id in range(self.used_num_streams):
results_raw_future.append(
self.tasks[stream_id](
*(self.args_streams_input[stream_id]),
**(self.kwargs_streams_input[stream_id])
)
)
for stream_id in range(self.used_num_streams):
# If we need to concat the output, for each position, we will push the result generated \
# by each stream into a list for concat later.
# For self._generate_outputs: here we put results_raw_future[stream_id].get() into a \
# [results_raw_future[stream_id].get()]
# to align the multi_stream_module_concat_hint structure.
self._generate_outputs(
[results_raw_future[stream_id].get()], stream_id
) if self.concat_output else results_raw.append(
results_raw_future[stream_id].get()
)
# If we need to concat the output, for each position, we will concat the result in the list \
# (generate in self._generate_outputs).
return (
self._concat_output_for_each_stream() if self.concat_output else results_raw
)
def get_stream_number(self):
return self.num_streams
class _MultiStreamBenchmarkModule(nn.Module):
# Here is an internal Module for weight sharing benchmark
# The diffence with MultiStreamModule:
# * The input will not be split. So each stream will run with the same input.
# * The output will not be concat. But synchronization point for each stream still exsits at the end \
# of the forward method.
def __init__(
self,
model,
num_streams: Union[int, str] = "AUTO",
cpu_pool: CPUPool = CPUPool(),
):
super(_MultiStreamBenchmarkModule, self).__init__()
assert (
type(cpu_pool) is CPUPool
), "Input of cpu_pool must be provided with type of ipex.cpu.runtime.CPUPool"
self.cpu_pool = cpu_pool
self.core_list = cpu_pool.core_ids
if isinstance(num_streams, str):
# For str input of num_streams, it must be "auto"
if num_streams.upper() == "AUTO":
self.num_streams = get_default_num_streams(
cpu_pool
) # The default selected value when auto selection is on.
else:
AssertionError(
False
), 'Input of num_streams must be Number of instances or string "AUTO"'
else:
assert isinstance(
num_streams, int
), 'Input of num_streams must be Number of instances or string "auto"'
self.num_streams = num_streams
if self.num_streams > self.core_list.__len__():
self.num_streams = self.core_list.__len__()
warnings.warn(
"The number of streams is larger than number of cores. The number of streams changes to {}.".format(
self.num_streams
)
)
if self.num_streams == 1:
# Sync execution path if num_stream is 1.
self.model = model
else:
self.cores_per_instance = self.core_list.__len__() // self.num_streams
num_stream_allocated_extra_core = (
self.core_list.__len__() % self.num_streams
)
self.tasks = []
start_core_list_idx = 0
end_core_list_idx = 0
for j in range(self.num_streams):
if j < num_stream_allocated_extra_core:
# If the core number is not divisible by stream number,
# the remainder streams(num_stream_allocated_extra_core) will be allocated one extra core.
end_core_list_idx += self.cores_per_instance + 1
else:
end_core_list_idx += self.cores_per_instance
self.tasks.append(
Task(
model,
CPUPool(self.core_list[start_core_list_idx:end_core_list_idx]),
)
)
start_core_list_idx = end_core_list_idx
def forward(self, *args, **kwargs):
if self.num_streams == 1:
# Sync execution path if num_stream is 1
if not core.is_same_core_affinity_setting(self.core_list):
# If the main thread's core affinity has been changed, we should set it again.
core.pin_cpu_cores(self.cpu_pool.cpu_pool)
return self.model(*args, **kwargs)
results_raw_future = []
results_raw = []
for j in range(self.num_streams):
results_raw_future.append(self.tasks[j](*args, **kwargs))
for j in range(self.num_streams):
results_raw.append(results_raw_future[j].get())
return results_raw[0]
| 28,592 | 45.568404 | 116 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/runtime/cpupool.py | import functools
import warnings
import intel_extension_for_pytorch as ipex
from .runtime_utils import get_core_list_of_node_id
class CPUPool(object):
r"""
An abstraction of a pool of CPU cores used for intra-op parallelism.
Args:
core_ids (list): A list of CPU cores' ids used for intra-op parallelism.
node_id (int): A numa node id with all CPU cores on the numa node.
``node_id`` doesn't work if ``core_ids`` is set.
Returns:
intel_extension_for_pytorch.cpu.runtime.CPUPool: Generated
intel_extension_for_pytorch.cpu.runtime.CPUPool object.
"""
def __init__(self, core_ids: list = None, node_id: int = None):
if not ipex._C._has_cpu():
return
if core_ids is not None:
if node_id is not None:
warnings.warn(
"Both of core_ids and node_id are inputed. core_ids will be used with priority."
)
if type(core_ids) is range:
core_ids = list(core_ids)
assert (
type(core_ids) is list
), "Input of core_ids must be the type of list[Int]"
self.core_ids = core_ids
elif node_id is not None:
self.core_ids = get_core_list_of_node_id(node_id)
else:
# Default case, we will use all the cores available for current process.
# Please note:
# * The cores will cross numa for multi sockets.
# * Logic cores will be used by default.
# The cores available for current process will change with external numactl cmd.
self.core_ids = ipex._C.get_process_available_cores()
self.cpu_pool = ipex._C.CPUPool(self.core_ids)
# The actual core ids inside CPUPool may be updated in creation of ipex._C.CPUPool.
# Since ipex._C.CPUPool will filter out core ids which not available for current process.
self.core_ids = self.cpu_pool.get_core_list()
class pin(object):
r"""
Apply the given CPU pool to the master thread that runs the scoped code
region or the function/method def.
Args:
cpu_pool (intel_extension_for_pytorch.cpu.runtime.CPUPool):
intel_extension_for_pytorch.cpu.runtime.CPUPool object, contains
all CPU cores used by the designated operations.
Returns:
intel_extension_for_pytorch.cpu.runtime.pin: Generated
intel_extension_for_pytorch.cpu.runtime.pin object which can be used
as a `with` context or a function decorator.
"""
def __init__(self, cpu_pool: CPUPool):
self.cpu_pool = cpu_pool
ipex._C.init_runtime_ext()
def __enter__(self):
assert type(self.cpu_pool) is CPUPool
self.previous_cpu_pool = ipex._C.get_current_cpu_pool()
ipex._C.pin_cpu_cores(self.cpu_pool.cpu_pool)
def __exit__(self, *args):
ipex._C.set_cpu_pool(self.previous_cpu_pool)
# Support decorator
def __call__(self, func):
@functools.wraps(func)
def decorate_pin(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate_pin
def is_runtime_ext_enabled():
r"""
Helper function to check whether runtime extension is enabled or not.
Args:
None (None): None
Returns:
bool: Whether the runtime exetension is enabled or not. If the
Intel OpenMP Library is preloaded, this API will return True.
Otherwise, it will return False.
"""
return ipex._C.is_runtime_ext_enabled() == 1
| 3,604 | 34 | 100 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/runtime/task.py | import torch
import intel_extension_for_pytorch as ipex
from .cpupool import CPUPool
class Task(object):
r"""
An abstraction of computation based on PyTorch module and is scheduled
asynchronously.
Args:
model (torch.jit.ScriptModule or torch.nn.Module): The input module.
cpu_pool (intel_extension_for_pytorch.cpu.runtime.CPUPool): An
intel_extension_for_pytorch.cpu.runtime.CPUPool object, contains
all CPU cores used to run Task asynchronously.
Returns:
intel_extension_for_pytorch.cpu.runtime.Task: Generated
intel_extension_for_pytorch.cpu.runtime.Task object.
"""
def __init__(self, module, cpu_pool: CPUPool):
self.cpu_pool = cpu_pool
assert type(self.cpu_pool) is CPUPool
if isinstance(module, torch.jit.ScriptModule):
self._task = ipex._C.TaskModule(module._c, self.cpu_pool.cpu_pool, True)
else:
self._task = ipex._C.TaskModule(module, self.cpu_pool.cpu_pool)
def __call__(self, *args, **kwargs):
# async execution
return self._task.run_async(*args, **kwargs)
def run_sync(self, *args, **kwargs):
# sync execution
return self._task.run_sync(*args, **kwargs)
| 1,254 | 32.918919 | 84 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/tpp/fused_bert.py | import torch
from torch import nn
from .utils.blocked_layout import (
BlockedParameter,
BlockedModule,
BlockedTensor,
get_blocking_signature,
)
import pkg_resources
import warnings
from .optim import AdamW, SGD
import intel_extension_for_pytorch._C as torch_ipex_cpp
try:
from transformers.modeling_utils import apply_chunking_to_forward
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
except ImportError:
pass
USE_BF16_PARAMS = True
layer_use_bf16 = False
unpad = True
print_cou = 0
def print_grad_hook(var, name):
if not hasattr(var, "grad_fn"):
return
def register_grad(grad_input, grad_output):
global print_cou
print(f"TESTGRADU {name}: {var.grad_fn.name()} - {grad_input[0].abs().sum()}")
torch.save(grad_input, "tmp_u_%d.pt" % print_cou)
print_cou += 1
var.grad_fn.register_hook(register_grad)
def generate_mask(attention_mask):
assert attention_mask is not None, "attention_mask is None"
B, _, _, S = attention_mask.shape
S1, S2 = BlockedModule.default_blocking_factors(S)
attention_mask = attention_mask.view([B, S]).clone()
if unpad:
nnz = (((attention_mask + 10000).count_nonzero(dim=-1) + (S2 - 1)) // S2) * S2
# nnz = (((attention_mask+10000).count_nonzero(dim=-1) + (S - 1))//S)*S
nnz1 = nnz.unsqueeze(dim=1).expand([-1, S])
a = torch.arange(S).expand([B, -1])
msk = a < nnz1
attention_mask = attention_mask[msk].clone()
seq_offsets = torch.cat([torch.zeros([1]), nnz // S2]).to(torch.long)
else:
msk = torch.ones_like(attention_mask).to(torch.bool)
seq_offsets = torch.cat([torch.zeros([1]), torch.ones([B]) * S // S2]).to(
torch.long
)
seq_sqr_offsets = seq_offsets * seq_offsets
seq_offsets = seq_offsets.cumsum(dim=0)
seq_sqr_offsets = seq_sqr_offsets.cumsum(dim=0)
return msk, attention_mask, seq_offsets, seq_sqr_offsets
class PadInput(torch.autograd.Function):
@staticmethod
def forward(ctx, input, msk, padded_shape):
ctx.save_for_backward(msk)
output = input.new_zeros(padded_shape)
output[msk, :] = input
return output
@staticmethod
def backward(ctx, grad_output):
(msk,) = ctx.saved_tensors
grad_input = grad_output[msk, :]
return grad_input, None, None
class UnpadInput(torch.autograd.Function):
@staticmethod
def forward(ctx, input, msk):
ctx.save_for_backward(msk)
ctx.shape = input.shape
output = input[msk, :]
return output
@staticmethod
def backward(ctx, grad_output):
(msk,) = ctx.saved_tensors
grad_input = grad_output.new_zeros(ctx.shape)
grad_input[msk, :] = grad_output
return grad_input, None
# class DummyLinear(BlockedModule):
# def __init__(self, in_features, out_features, bias=True):
# super(DummyLinear, self).__init__()
# self.weight = BlockedParameter(torch.Tensor(out_features, in_features))
# if bias:
# self.bias = BlockedParameter(torch.Tensor(out_features))
# else:
# self.register_parameter("bias", None)
# self.reset_parameters()
#
# def reset_parameters(self):
# init.kaiming_uniform_(self.weight, a=math.sqrt(5))
# if self.bias is not None:
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.bias, -bound, bound)
#
# def forward(self, input):
# raise NotImplementedError
# return input
class DummyLinear(BlockedModule, torch.nn.Linear):
def __init__(self, in_features, out_features, bias=True):
# super(DummyLinear, self).__init__()
torch.nn.Linear.__init__(self, in_features, out_features, bias)
self.weight = BlockedParameter(self.weight.data)
if bias:
self.bias = BlockedParameter(self.bias.data)
def forward(self, input):
raise NotImplementedError
return input
class DummyLayerNorm(BlockedModule, torch.nn.LayerNorm):
def __init__(self, *args, **kwargs):
torch.nn.LayerNorm.__init__(self, *args, **kwargs)
if self.elementwise_affine:
self.weight = BlockedParameter(self.weight.data)
self.bias = BlockedParameter(self.bias.data)
def forward(self, input):
raise NotImplementedError
return input
class BertSelfAttentionFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, p, training, need_attention_output, *inputs):
# print("FWD Called")
# print("BSAFWD:", [t.shape if isinstance(t, torch.Tensor) else t for t in inputs[6:]])
(
context_layer,
attention_probs_out,
hs_t,
ehs_t,
ql_t,
kl_tv,
vl_tv,
ap,
apd_t,
ap_dp_mask,
) = torch.ops.torch_ipex.fused_self_attention_fwd_unpad(p, inputs, training)
(qw, qb, kw, kb, vw, vb, hs, am, hm, ehs, eam, offs, offs2) = inputs
ctx.save_for_backward(
qw,
kw,
vw,
hs_t,
hm,
ehs_t,
ql_t,
kl_tv,
vl_tv,
ap,
apd_t,
ap_dp_mask,
offs,
offs2,
)
ctx.p = p
# stop = False
# for i, t in enumerate([context_layer, attention_probs_out, hs_t, ehs_t, ql_t, kl_tv, vl_tv, ap, apd_t, ap_dp_mask]):
# nan = t.isnan().any().item()
# stop = stop or nan
# if nan: print ("Nan found in %d tensor" % i)
# if stop: raise "Nan Found"
# print("Returning from FWD")
if need_attention_output:
return context_layer, attention_probs_out
else:
return (context_layer,)
@staticmethod
def backward(ctx, *grad_outs):
# print("BWD Called")
inputs = []
inputs += [g.contiguous() for g in grad_outs]
if len(inputs) == 1:
inputs.append(inputs[0].new_empty(0))
inputs += ctx.saved_tensors
p = ctx.p
(
dqw,
dqb,
dkw,
dkb,
dvw,
dvb,
dhs,
dehs,
) = torch.ops.torch_ipex.fused_self_attention_bwd_unpad(p, inputs)
ehs = inputs[7]
if ehs is None:
dehs = None
# print("Returning from BWD")
# print("DHS:", dhs.view([-1])[:4])
return (
None,
None,
None,
dqw,
dqb,
dkw,
dkb,
dvw,
dvb,
dhs,
None,
None,
dehs,
None,
None,
None,
)
class BertSelfAttention(BlockedModule):
r"""PCL Bert Self Attention Layer using libxsmm blocked GEMM"""
# __constants__ = ['bias', 'C', 'K']
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, "embedding_size"
):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
# self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads # N
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads
) # H
self.all_head_size = self.num_attention_heads * self.attention_head_size # NH
self.hidden_size = config.hidden_size # HS
self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
self.query = DummyLinear(config.hidden_size, self.all_head_size)
self.key = DummyLinear(config.hidden_size, self.all_head_size)
self.value = DummyLinear(config.hidden_size, self.all_head_size)
self.is_decoder = config.is_decoder
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
assert (
self.position_embedding_type == "absolute"
), "self.position_embedding_type other than absolute not supported"
self.query.weight.set_blocking_param(
(
[self.attention_head_size, self.attention_head_size],
[0, 2, 3, 1],
)
)
self.key.weight.set_blocking_param(
(
[self.attention_head_size, self.attention_head_size],
[0, 2, 3, 1],
)
)
self.value.weight.set_blocking_param(
(
[self.attention_head_size, self.attention_head_size],
[0, 2, 3, 1],
)
)
self.blocked_input_signature = get_blocking_signature("SF", "SFSF")
if layer_use_bf16 is True and USE_BF16_PARAMS:
self.query.weight.set_blocking_param(
(
[self.attention_head_size, [self.attention_head_size // 2, 2]],
[0, 2, 3, 1, 4],
torch.bfloat16,
)
)
self.key.weight.set_blocking_param(
(
[self.attention_head_size, [self.attention_head_size // 2, 2]],
[0, 2, 3, 1, 4],
torch.bfloat16,
)
)
self.value.weight.set_blocking_param(
(
[self.attention_head_size, [self.attention_head_size // 2, 2]],
[0, 2, 3, 1, 4],
torch.bfloat16,
)
)
self.query.bias.set_blocking_param((None, None, torch.bfloat16))
self.key.bias.set_blocking_param((None, None, torch.bfloat16))
self.value.bias.set_blocking_param((None, None, torch.bfloat16))
self.use_bf16 = layer_use_bf16
# self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def maybe_block_params(self):
self.query.weight.block()
self.key.weight.block()
self.value.weight.block()
self.query.bias.block()
self.key.bias.block()
self.value.bias.block()
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
seq_offsets=None,
seq_sqr_offsets=None,
):
assert past_key_value is None, "past_key_value not supported"
self.maybe_block_params()
if encoder_hidden_states is not None:
assert (
encoder_hidden_states.shape == hidden_states.shape
), "Different shapes not supported(%s != %s)" % (
encoder_hidden_states.shape,
hidden_states.shape,
)
encoder_hidden_states = self.get_blocked_tensor(
encoder_hidden_states,
self.blocked_input_signature,
[None, self.attention_head_size],
)
orig_hidden_states = hidden_states
hidden_states = self.get_blocked_tensor(
hidden_states,
self.blocked_input_signature,
[None, self.attention_head_size],
)
# print(f"hidden_states: {hidden_states.shape}")
inputs = [
self.query.weight,
self.query.bias,
self.key.weight,
self.key.bias,
self.value.weight,
self.value.bias,
]
inputs.append(hidden_states)
if attention_mask is not None:
# print(f"attention_mask: {attention_mask.shape}")
# B, S1, N, S2, H = hidden_states.shape
# S = S1 * S2
# print("Before attention_mask shape = %s (%s)" % (attention_mask.shape, attention_mask.numel()))
# attention_mask = attention_mask.expand([B, N, S, S]).view(
# [B, N, S1, S2, S1, S2]).permute([0, 2, 1, 4, 3, 5]).contiguous()
# assert (
# attention_mask.size(1) == attention_mask.size(2) == 1
# ), "unsupported attention_mask shape %s" % (attention_mask.shape,)
attention_mask = attention_mask.contiguous()
# print("After attention_mask shape = %s (%s)" % (attention_mask.shape, attention_mask.numel()))
if head_mask is not None:
print(f"head_mask: {head_mask.shape}")
if encoder_attention_mask is not None:
print(f"encoder_attention_mask: {encoder_attention_mask.shape}")
# B, S1, N, S2, H = encoder_hidden_states.shape
# S = S1 * S2
# encoder_attention_mask = encoder_attention_mask.expand([B, N, S, S]).view(
# [B, N, S1, S2, S1, S2]).permute([0, 2, 1, 4, 3, 5]).contiguous()
assert (
encoder_attention_mask.size(1) == encoder_attention_mask.size(2) == 1
), "unsupported encoder_attention_mask shape %s" % (
encoder_attention_mask.shape,
)
encoder_attention_mask = encoder_attention_mask.contiguous()
inputs.append(attention_mask if attention_mask is not None else torch.Tensor())
inputs.append(head_mask if head_mask is not None else torch.Tensor())
inputs.append(
encoder_hidden_states
if encoder_hidden_states is not None
else torch.Tensor()
)
inputs.append(
encoder_attention_mask
if encoder_attention_mask is not None
else torch.Tensor()
)
inputs.append(seq_offsets if seq_offsets is not None else torch.Tensor())
inputs.append(
seq_sqr_offsets if seq_sqr_offsets is not None else torch.Tensor()
)
# context_layer, attention_probs = torch.ops.torch_ipex.forward(self.handle.handle, inputs)
p = self.attention_probs_dropout_prob if self.training else 0.0
if self.use_bf16:
inputs = [
i.to(torch.bfloat16) if i.is_floating_point() else i for i in inputs
]
outputs = BertSelfAttentionFunction.apply(
p, self.training, output_attentions, *inputs
)
# outputs = BertSelfAttentionFunction.apply(p, self.training, True, *inputs)
context_layer = outputs[0]
context_layer = BlockedTensor(
context_layer, self.blocked_input_signature, orig_hidden_states.dtype
)
if output_attentions:
print("Reshaping output_attentions")
attention_probs = outputs[1]
attention_probs = (
attention_probs.permute([0, 2, 1, 4, 3, 5])
.contiguous()
.view([B, self.num_attention_heads, S, S])
.to(orig_hidden_states.dtype)
)
outputs = (
(context_layer, attention_probs) if output_attentions else (context_layer,)
)
return outputs
class BertOutputBaseFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, p, eps, training, *inputs):
(inp, inp2, wt, bias, gamma, beta) = inputs
# print("A")
outputs = torch.ops.torch_ipex.fused_dense_dropout_layernorm_fwd_unpad(
p, eps, inputs, training
)
# print("B")
(out, dout, mean, var, dp_mask) = outputs
ctx.save_for_backward(inp, wt, gamma, mean, var, dout, dp_mask)
# print("C")
ctx.p = p
return out
@staticmethod
def backward(ctx, *grad_outs):
inputs = list(grad_outs)
inputs += ctx.saved_tensors
(
grad_inp,
grad_inp2,
grad_wt,
grad_bias,
grad_gamma,
grad_beta,
) = torch.ops.torch_ipex.fused_dense_dropout_layernorm_bwd_unpad(ctx.p, inputs)
return (
None,
None,
None,
grad_inp,
grad_inp2,
grad_wt,
grad_bias,
grad_gamma,
grad_beta,
)
class BertOutputBase(BlockedModule):
def __init__(self, config, selfOutput):
super().__init__()
ifm = config.hidden_size if selfOutput else config.intermediate_size
self.dense = DummyLinear(ifm, config.hidden_size)
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm = DummyLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.hidden_dropout_prob = config.hidden_dropout_prob
self.layer_norm_eps = config.layer_norm_eps
self.attention_head_size = config.hidden_size // config.num_attention_heads
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense.weight.set_blocking_param(
(
[self.attention_head_size, self.attention_head_size],
[0, 2, 3, 1],
)
)
self.blocked_input_signature = get_blocking_signature("SF", "SFSF")
if layer_use_bf16 is True and USE_BF16_PARAMS:
self.dense.weight.set_blocking_param(
(
[self.attention_head_size, [self.attention_head_size // 2, 2]],
[0, 2, 3, 1, 4],
torch.bfloat16,
)
)
self.dense.bias.set_blocking_param((None, None, torch.bfloat16))
self.LayerNorm.weight.set_blocking_param((None, None, torch.bfloat16))
self.LayerNorm.bias.set_blocking_param((None, None, torch.bfloat16))
self.use_bf16 = layer_use_bf16
# print(f"config.hidden_size = {config.hidden_size}, ifm = {ifm},
# p = {config.hidden_dropout_prob}, eps = {config.layer_norm_eps}")
def maybe_block_params(self):
self.dense.weight.block()
self.dense.bias.block()
self.LayerNorm.weight.block()
self.LayerNorm.bias.block()
def forward(self, hidden_states, input_tensor):
self.maybe_block_params()
orig_hidden_states = hidden_states
hidden_states = self.get_blocked_tensor(
hidden_states,
self.blocked_input_signature,
[None, self.attention_head_size],
)
input_tensor = self.get_blocked_tensor(
input_tensor,
self.blocked_input_signature,
[None, self.attention_head_size],
)
inputs = [
hidden_states,
input_tensor,
self.dense.weight,
self.dense.bias,
self.LayerNorm.weight,
self.LayerNorm.bias,
]
p = self.hidden_dropout_prob if self.training else 0.0
if self.use_bf16:
inputs = [
i.to(torch.bfloat16) if i.is_floating_point() else i for i in inputs
]
ret = BertOutputBaseFunction.apply(
p, self.layer_norm_eps, self.training, *inputs
)
# ret = ret.to(hidden_states.dtype)
ret = BlockedTensor(ret, self.blocked_input_signature, orig_hidden_states.dtype)
return ret
# hidden_states = self.dense(hidden_states)
# hidden_states = self.dropout(hidden_states)
# hidden_states = self.LayerNorm(hidden_states + input_tensor)
# return hidden_states
class BertSelfOutput(BertOutputBase):
def __init__(self, config):
super(BertSelfOutput, self).__init__(config, True)
class BertOutput(BertOutputBase):
def __init__(self, config):
super(BertOutput, self).__init__(config, False)
class BertIntermediateFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, act, training):
# assert act == "gelu_new", "%s activation type is not supported" % act
gelu_in, output = torch.ops.torch_ipex.fused_dense_gelu_fwd_unpad(
input, weight, bias, training
)
ctx.save_for_backward(input, weight, gelu_in)
ctx.act = act
return output
@staticmethod
def backward(ctx, grad_out):
(input, weight, gelu_in) = ctx.saved_tensors
grad_out = grad_out.contiguous()
grad_inp, grad_wt, grad_bias = torch.ops.torch_ipex.fused_dense_gelu_bwd_unpad(
grad_out, gelu_in, input, weight
)
return (grad_inp, grad_wt, grad_bias, None, None)
class BertIntermediate(BlockedModule):
def __init__(self, config):
super().__init__()
self.dense = DummyLinear(config.hidden_size, config.intermediate_size)
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.dense.weight.set_blocking_param(
(
[self.attention_head_size, self.attention_head_size],
[0, 2, 3, 1],
)
)
assert config.hidden_act in ["gelu", "gelu_new"], (
"Currently, only GELU new is supported in fused op, %s is given"
% config.hidden_act
)
self.hidden_act = config.hidden_act
self.blocked_input_signature = get_blocking_signature("SF", "SFSF")
if layer_use_bf16 is True and USE_BF16_PARAMS:
self.dense.weight.set_blocking_param(
(
[self.attention_head_size, [self.attention_head_size // 2, 2]],
[0, 2, 3, 1, 4],
torch.bfloat16,
)
)
self.dense.bias.set_blocking_param((None, None, torch.bfloat16))
self.use_bf16 = True if layer_use_bf16 else False
# if isinstance(config.hidden_act, str):
# self.intermediate_act_fn = ACT2FN[config.hidden_act]
# else:
# self.intermediate_act_fn = config.hidden_act
def maybe_block_params(self):
self.dense.weight.block()
self.dense.bias.block()
def forward(self, hidden_states):
self.maybe_block_params()
orig_hidden_states = hidden_states
hidden_states = self.get_blocked_tensor(
hidden_states,
self.blocked_input_signature,
[None, self.attention_head_size],
)
inputs = [hidden_states, self.dense.weight, self.dense.bias]
if self.use_bf16:
inputs = [
i.to(torch.bfloat16) if i.is_floating_point() else i for i in inputs
]
ret = BertIntermediateFunction.apply(*inputs, self.hidden_act, self.training)
# ret = ret.to(hidden_states.dtype)
hidden_states = BlockedTensor(
ret, self.blocked_input_signature, orig_hidden_states.dtype
)
# hidden_states = self.dense(hidden_states)
# hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertEmbeddingsFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, training, prob, eps, head_size, pad_id, *inputs):
(ii, pi, ti, ie, g, b, we, pe, te) = inputs
(
out,
eout,
mean,
var,
msk,
) = torch.ops.torch_ipex.fused_embedding_layernorm_dropout_fwd_unpad(
prob, eps, head_size, pad_id, inputs, training
)
ctx.save_for_backward(ii, pi, ti, ie, g, we, pe, te, mean, var, eout, msk)
ctx.prob = prob
ctx.pad_id = pad_id
return out
@staticmethod
def backward(ctx, *grad_outs):
prob = ctx.prob
pad_id = ctx.pad_id
inputs = []
inputs += [t.contiguous() for t in grad_outs]
inputs += ctx.saved_tensors
(
die,
dg,
db,
dwe,
dpe,
dte,
) = torch.ops.torch_ipex.fused_embedding_layernorm_dropout_bwd_unpad(
prob, pad_id, inputs
)
grad_inps = (
None,
None,
None,
die,
dg,
db,
dwe,
dpe,
dte,
)
return (None, None, None, None, None) + grad_inps
class BertEmbeddings(BlockedModule):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = DummyLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layer_norm_eps = config.layer_norm_eps
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.hidden_dropout_prob = config.hidden_dropout_prob
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.pad_token_id = config.pad_token_id
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
assert (
self.position_embedding_type == "absolute"
), f"position embedding type {self.position_embedding_type} not supported"
self.blocked_ids_signature = get_blocking_signature("BS", "BSS")
self.blocked_embed_signature = get_blocking_signature("BSF", "BSFSF")
self.use_bf16 = layer_use_bf16
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
print(
f"config.hidden_size = {config.hidden_size}, config.intermediate_size = {config.intermediate_size},\
p = {config.hidden_dropout_prob}, eps = {config.layer_norm_eps}, bf16 = {layer_use_bf16}"
)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
assert past_key_values_length == 0, "past_key_values_length != 0 Not supported"
if input_ids is not None:
input_shape = input_ids.size()
input_ids = self.get_blocked_tensor(
input_ids, self.blocked_ids_signature, [None, None]
)
else:
input_shape = inputs_embeds.size()[:-1]
input_ids = torch.LongTensor()
inputs_embeds = self.get_blocked_tensor(
inputs_embeds,
self.blocked_embed_signature,
[None, self.attention_head_size],
)
# seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.LongTensor()
else:
position_ids = self.get_blocked_tensor(
position_ids, self.blocked_ids_signature, [None, None]
)
if token_type_ids is None:
token_type_ids = torch.LongTensor()
else:
token_type_ids = self.get_blocked_tensor(
token_type_ids, self.blocked_ids_signature, [None, None]
)
if inputs_embeds is None:
inputs_embeds = torch.Tensor()
# inputs_embeds = self.word_embeddings(input_ids)
# position_embeddings = self.position_embeddings(position_ids)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
emb_weighs = [
self.word_embeddings.weight,
self.position_embeddings.weight,
self.token_type_embeddings.weight,
]
inputs = [
input_ids,
position_ids,
token_type_ids,
inputs_embeds,
self.LayerNorm.weight,
self.LayerNorm.bias,
]
p = self.hidden_dropout_prob if self.training else 0.0
if self.use_bf16:
inputs = [
i.to(torch.bfloat16) if i.is_floating_point() else i for i in inputs
]
inputs += emb_weighs
embeddings = BertEmbeddingsFunction.apply(
self.training,
p,
self.layer_norm_eps,
self.attention_head_size,
self.pad_token_id,
*inputs,
)
# embeddings = BlockedTensor(embeddings, self.blocked_embed_signature, torch.bfloat16 if self.use_bf16 else torch.float)
embeddings = BlockedTensor(
embeddings, self.blocked_embed_signature, torch.float
)
# embeddings = inputs_embeds + position_embeddings + token_type_embeddings
# embeddings = self.LayerNorm(embeddings)
# embeddings = self.dropout(embeddings)
return embeddings
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads,
self.self.num_attention_heads,
self.self.attention_head_size,
self.pruned_heads,
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = (
self.self.attention_head_size * self.self.num_attention_heads
)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
seq_offsets=None,
seq_sqr_offsets=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
seq_offsets=seq_offsets,
seq_sqr_offsets=seq_sqr_offsets,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert (
self.is_decoder
), f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
seq_offsets=None,
seq_sqr_offsets=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
seq_offsets=seq_offsets,
seq_sqr_offsets=seq_sqr_offsets,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[
1:
] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated\
with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = (
past_key_value[-2:] if past_key_value is not None else None
)
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
seq_offsets=seq_offsets,
seq_sqr_offsets=seq_sqr_offsets,
)
attention_output = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:-1]
) # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.blocked_input_signature = get_blocking_signature(
# "SF", "SFSF"
# )
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
next_decoder_cache = () if use_cache else None
if hasattr(hidden_states, "unblocked_tensor"):
hidden_states = hidden_states.unblocked_tensor()
padded_shape = hidden_states.shape
# print_grad_hook(hidden_states, 'BertEncoder:hidden_states')
msk, attention_mask, seq_offsets, seq_sqr_offsets = generate_mask(
attention_mask
)
hidden_states = UnpadInput.apply(hidden_states, msk)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
seq_offsets=seq_offsets,
seq_sqr_offsets=seq_sqr_offsets,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
seq_offsets=seq_offsets,
seq_sqr_offsets=seq_sqr_offsets,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if hasattr(hidden_states, "unblocked_tensor"):
hidden_states = hidden_states.unblocked_tensor()
hidden_states = PadInput.apply(hidden_states, msk, padded_shape)
# print_grad_hook(hidden_states, 'BertEncoder:hidden_states')
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = DummyLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# bm_default_blocking_factors = BlockedModule.default_blocking_factors
# @staticmethod
# def custom_blocking_factors(S):
# print(f"S = {S}")
# if S % 32 == 0: return [S//32, 32]
# return bm_default_blocking_factors
# BlockedModule.default_blocking_factors = custom_blocking_factors
try:
import transformers
transformers_orig_is_tensor = transformers.file_utils.is_tensor
def is_tensor(x):
"""Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`."""
if transformers_orig_is_tensor(x):
return True
if isinstance(x, BlockedTensor):
return True
return False
transformers.file_utils.is_tensor = is_tensor
except ImportError:
pass
def block(model):
for m in model.modules():
if hasattr(m, "maybe_block_params"):
m.maybe_block_params()
def fast_bert(model, dtype=torch.float, optimizer=None, unpad=False):
r"""
Use TPP to speedup training/inference. fast_bert API is still a experimental
feature and now only optimized for bert model.
Args:
model (torch.nn.Module): User model to apply optimizations on.
dtype (torch.dtype): Only works for ``torch.bfloat16`` and ``torch.float`` .
The default value is torch.float.
optimizer (torch.optim.Optimizer): User optimizer to apply optimizations
on, such as SGD. The default value is ``None``, meaning inference case.
unpad(bool): Unpad the squence to reduce the sparsity.
seed(string): The seed used for the libxsmm kernel. In general it should be same
to the torch.seed
.. warning::
Please invoke ``fast_bert`` function AFTER loading weights to model via
``model.load_state_dict(torch.load(PATH))``.
.. warning::
This API can't be used when you have applied the ipex.optimize.
.. warning::
Please invoke ``optimize`` function BEFORE invoking DDP in distributed
training scenario.
Examples:
>>> # bfloat16 inference case.
>>> model = ...
>>> model.load_state_dict(torch.load(PATH))
>>> model.eval()
>>> optimized_model = ipex.tpp_bert(model, dtype=torch.bfloat16)
>>> # running evaluation step.
>>> # bfloat16 training case.
>>> optimizer = ...
>>> model.train()
>>> optimized_model, optimized_optimizer = ipex.fast_bert(model, dtype=torch.bfloat16,
optimizer=optimizer, unpad=True, seed=args.seed)
>>> # running training step.
"""
# tpp bert optimization depends on the transformers repo to implementate the related module
installed_pkg = {pkg.key for pkg in pkg_resources.working_set}
min_version = "4.6.0"
max_version = "4.20.0"
if "transformers" not in installed_pkg:
raise RuntimeError(
"Please installed the transformers with version: between {} and {}".format(
min_version, max_version
)
)
import transformers
from packaging import version
trans_version = transformers.__version__
if version.parse(trans_version) < version.parse(min_version) or version.parse(
trans_version
) > version.parse(max_version):
raise RuntimeError(
"Please installed the transformers with version: between {} and {} while now transformers== {}".format(
min_version, max_version, trans_version
)
)
PT_OPTIMIZER_TO_TPP_OPTIMIZER = {
torch.optim.AdamW: AdamW,
transformers.optimization.AdamW: AdamW,
torch.optim.SGD: SGD,
}
if dtype not in (
torch.float,
torch.bfloat16,
):
raise ValueError("TPP only supports torch.float and torch.bfloat16.")
# setup the seed for libxsmm (can be only positive int value) which will imapct some ops using seed. e.g., dropout
try:
torch_ipex_cpp.xsmm_manual_seed(
torch.tensor(torch.initial_seed()).to(torch.int32).abs().item()
)
except BaseException:
warnings.warn(
"Set seed failed for libxsmm which may impact the training loss, you can call \
torch.manual_seed(N) before invoking fast_bert."
)
# replace the original transfomers module object with tpp module which has the same functionality but with more
# operator fusion optimization
new_model = copy.deepcopy(model)
layer_use_bf16 = True if dtype == torch.bfloat16 else False
if unpad:
unpad = True
else:
unpad = False
if isinstance(model, transformers.models.bert.modeling_bert.BertModel):
assert isinstance(
new_model.embeddings, transformers.models.bert.modeling_bert.BertEmbeddings
)
new_model.embeddings = BertEmbeddings(model.config)
assert isinstance(
new_model.encoder, transformers.models.bert.modeling_bert.BertEncoder
)
new_model.encoder = BertEncoder(model.config)
elif hasattr(model, "bert") and isinstance(
model.bert, transformers.models.bert.modeling_bert.BertModel
):
assert isinstance(
new_model.bert.embeddings,
transformers.models.bert.modeling_bert.BertEmbeddings,
)
new_model.bert.embeddings = BertEmbeddings(model.bert.config)
assert isinstance(
new_model.bert.encoder, transformers.models.bert.modeling_bert.BertEncoder
)
new_model.bert.encoder = BertEncoder(model.bert.config)
else:
warnings.warn(
"fast_bert only supports instance of transformers.models.bert.modeling_bert.BertModel"
)
return model, optimizer
new_model.load_state_dict(
model.state_dict()
) # copy the original params into the tpp module
block(new_model) # get block format weights/bias
if optimizer is None:
return new_model
# replace the original pytorch/transformer optimizer with tpp optimizer for SGD/AdamW
# keep the original optimizer state and replace the params with the blocked tpp params
param_pair = {}
for param_ori, param_tpp in zip(model.parameters(), new_model.parameters()):
param_pair[param_ori] = param_tpp
if type(optimizer) not in PT_OPTIMIZER_TO_TPP_OPTIMIZER:
warnings.warn(
"Still return the origin optimize, the fast_bert can only replace the SGD, AdamW optimizer"
)
new_optimizer = optimizer
else:
new_optimizer = PT_OPTIMIZER_TO_TPP_OPTIMIZER[type(optimizer)]([{"params": []}])
new_optimizer.state = optimizer.state
new_optimizer.param_groups = optimizer.param_groups
for group in new_optimizer.param_groups:
for i, p in enumerate(group["params"]):
if p in param_pair:
new_param = param_pair[p]
group["params"][i] = new_param
return new_model, new_optimizer
| 48,905 | 35.388393 | 128 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/tpp/optim.py | import math
from typing import Callable, Iterable, Tuple
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import intel_extension_for_pytorch._C as ipex_cpp
class SGD(Optimizer):
r"""Implements low precision stochastic gradient descent with extra state."""
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
):
if not is_available():
raise ValueError("Module function 'bf16_update' not available for SplitSGD")
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum != 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay != 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SplitSGD, self).__init__(params, defaults)
print("Using SplitSGD")
def __setstate__(self, state):
super(SplitSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if p.dtype == torch.bfloat16:
param_state = self.state[p]
if "low_bits" not in param_state:
buf = param_state["low_bits"] = torch.zeros_like(
p.data, dtype=torch.short
)
else:
buf = param_state["low_bits"]
# if weight_decay != 0:
# d_p = d_p.add(weight_decay, p.data)
# if momentum != 0:
# param_state = self.state[p]
# if 'momentum_buffer' not in param_state:
# buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
# else:
# buf = param_state['momentum_buffer']
# buf.mul_(momentum).add_(1 - dampening, d_p)
# if nesterov:
# d_p = d_p.add(momentum, buf)
# else:
# d_p = buf
# p.data.add_(-group['lr'], d_p)
if p.dtype == torch.bfloat16:
ipex_cpp.tpp_bf16_split_add_(p.data, buf, d_p, -group["lr"])
else:
if d_p.is_sparse:
ipex_cpp.tpp_dense_sparse_add(p.data, d_p, -group["lr"])
else:
p.data.add_(d_p, alpha=-group["lr"])
return loss
class AdamW(Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in `Decoupled Weight Decay Regularization
<https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`Iterable[torch.nn.parameter.Parameter]`):
Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])
)
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
correct_bias=correct_bias,
)
super().__init__(params, defaults)
def step(self, closure: Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
data = p.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
if hasattr(torch, "bfloat8") and p.data.dtype == torch.bfloat8:
data = data.to(torch.float)
grad = grad.to(torch.float)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(data)
# Lower bits for bf16 params
if p.data.dtype == torch.bfloat16:
state["low_bits"] = torch.zeros_like(p.data)
elif hasattr(torch, "bfloat8") and p.data.dtype == torch.bfloat8:
state["master_copy"] = data
if hasattr(torch, "bfloat8") and p.data.dtype == torch.bfloat8:
data = state["master_copy"]
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if data.dtype == torch.bfloat16:
low_bits = state["low_bits"]
beta1, beta2 = group["betas"]
state["step"] += 1
# # Decay the first and second moment running average coefficient
# # In-place operations to update the averages at the same time
# exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
# exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
# denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = (
step_size * math.sqrt(bias_correction2) / bias_correction1
)
# p.data.addcdiv_(exp_avg, denom, value=-step_size)
# # Just adding the square of the weights to the loss function is *not*
# # the correct way of using L2 regularization/weight decay with Adam,
# # since that will interact with the m and v parameters in strange ways.
# #
# # Instead we want to decay the weights in a manner that doesn't interact
# # with the m/v parameters. This is equivalent to adding the square
# # of the weights to the loss with plain (non-momentum) SGD.
# # Add weight decay at the end (fixed version)
# if group["weight_decay"] > 0.0:
# p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
if data.dtype == torch.bfloat16:
ipex_cpp.tpp_fused_split_adamw(
data,
low_bits,
grad.contiguous(),
exp_avg,
exp_avg_sq,
beta1,
beta2,
step_size,
group["lr"],
group["weight_decay"],
group["eps"],
)
else:
ipex_cpp.tpp_fused_adamw(
data,
grad.contiguous(),
exp_avg,
exp_avg_sq,
beta1,
beta2,
step_size,
group["lr"],
group["weight_decay"],
group["eps"],
)
if hasattr(torch, "bfloat8") and p.data.dtype == torch.bfloat8:
p.data.copy_(state["master_copy"].to(torch.bfloat8))
return loss
def clip_grad_norm_(parameters, max_norm, norm_type=2, grad_list=False):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
from torch import inf
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
if not grad_list:
parameters = list(filter(lambda p: p.grad is not None, parameters))
grads = [p.grad.detach() for p in parameters]
else:
grads = parameters
max_norm = float(max_norm)
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.0)
device = grads[0].device
if norm_type == 2:
return torch.tensor(ipex_cpp.tpp_clip_grad_norm(grads, max_norm))
if norm_type == inf:
total_norm = max(grad.detach().abs().max().to(device) for grad in grads)
else:
total_norm = torch.norm(
torch.stack(
[torch.norm(grad.detach(), norm_type).to(device) for grad in grads]
),
norm_type,
)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for grad in grads:
grad.detach().mul_(clip_coef.to(grad.device))
return total_norm
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params: Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
adam: bool = False,
correct_bias: bool = True,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
correct_bias=correct_bias,
)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
self.bf16 = p.grad.dtype == torch.bfloat16
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Lamb does not support sparse gradients, consider SparseAdam instad."
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
# Lower bits for bf16 params
if p.data.dtype == torch.bfloat16:
state["low_bits"] = torch.zeros_like(p.data)
state["weight_norm"] = -1.0
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if p.data.dtype == torch.bfloat16:
low_bits = state["low_bits"]
beta1, beta2 = group["betas"]
weight_norm = state["weight_norm"]
state["step"] += 1
if p.data.dtype == torch.bfloat16:
AssertionError(False, "BF16 LAMB optimizer not implemented yet!")
state["weight_norm"] = ipex_cpp.tpp_fused_split_lamb(
p.data,
low_bits,
grad.contiguous(),
exp_avg,
exp_avg_sq,
beta1,
beta2,
weight_norm,
group["lr"],
group["weight_decay"],
group["eps"],
)
else:
state["weight_norm"] = ipex_cpp.tpp_fused_lamb(
p.data,
grad.contiguous(),
exp_avg,
exp_avg_sq,
beta1,
beta2,
weight_norm,
group["lr"],
group["weight_decay"],
group["eps"],
)
# # Decay the first and second moment running average coefficient
# if self.bf16:
# # m_t
# exp_avg.mul_(beta1).add_(1 - beta1, grad_fp32)
# # v_t
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad_fp32, grad_fp32)
# else:
# # m_t
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
# # v_t
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# # Paper v3 does not use debiasing.
# # bias_correction1 = 1 - beta1 ** state['step']
# # bias_correction2 = 1 - beta2 ** state['step']
# # Apply bias to lr to avoid broadcast.
# step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
# weight_norm = data_fp32.pow(2).sum().sqrt().clamp(0, 10) if self.bf16 \
# else p.data.pow(2).sum().sqrt().clamp(0, 10)
# adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
# if group['weight_decay'] != 0:
# if self.bf16:
# adam_step.add_(group['weight_decay'], data_fp32)
# else:
# adam_step.add_(group['weight_decay'], p.data)
# adam_norm = adam_step.pow(2).sum().sqrt()
# if weight_norm == 0 or adam_norm == 0:
# trust_ratio = 1
# else:
# trust_ratio = weight_norm / adam_norm
# state['weight_norm'] = weight_norm
# state['adam_norm'] = adam_norm
# state['trust_ratio'] = trust_ratio
# if self.adam:
# trust_ratio = 1
# if self.bf16:
# data_fp32.add_(-step_size * trust_ratio, adam_step)
# p.data = data_fp32.to(torch.bfloat16)
# else:
# p.data.add_(-step_size * trust_ratio, adam_step)
return loss
class DistLamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=0,
adam=False,
bias_correction=True,
block_size=1024,
perform_allreduce=False,
fused_param_norm=True,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
self.block_size = block_size
self.bias_correction = bias_correction
self.perform_allreduce = perform_allreduce
self.distributed = (
torch.distributed.is_initialized()
and torch.distributed.get_world_size() > 1
)
self.fused_param_norm = fused_param_norm
self._acc_steps = 0
self._one_time_setup_done = False
super(DistLamb, self).__init__(params, defaults)
class FlatBuffer:
def __init__(self, param_list, group, dtype, block_size):
self.param_list = param_list
self.group = group
self.dtype = dtype
self.block_size = block_size
p_i = 0
total_size = 0
size_array = []
padded_size_array = []
offset_array = [0]
cur_offset = 0
model_params = []
block2param = []
block_sizes = []
for p in self.param_list:
sz = p.data.numel()
aligned_blocks = (sz + self.block_size - 1) // self.block_size
aligned_sz = aligned_blocks * self.block_size
block2param += [p_i] * aligned_blocks
block_sizes += [self.block_size] * aligned_blocks
block_sizes[-1] = (
self.block_size
if sz % self.block_size == 0
else sz % self.block_size
)
size_array.append(sz)
padded_size_array.append(aligned_sz)
cur_offset += aligned_sz
offset_array.append(cur_offset)
total_size += aligned_sz
p_i += 1
self._flat_w = torch.zeros([total_size], dtype=dtype)
self._flat_g = torch.zeros([total_size], dtype=dtype)
self._flat_m = torch.zeros([total_size], dtype=dtype)
self._flat_v = torch.zeros([total_size], dtype=dtype)
self._flat_u = torch.zeros([total_size], dtype=dtype)
self._flat_ag = torch.zeros([total_size], dtype=dtype)
if dtype == torch.bfloat16:
self._flat_wl = torch.zeros([total_size], dtype=dtype)
else:
self._flat_wl = torch.zeros([0])
self._param_sizes = torch.tensor(size_array, dtype=torch.long)
self._param_sizes_padded = torch.tensor(padded_size_array, dtype=torch.long)
self._offsets = torch.tensor(offset_array, dtype=torch.long)
self._block2param = torch.tensor(block2param, dtype=torch.int)
self._block_sizes = torch.tensor(block_sizes, dtype=torch.int)
self._weight_norms = torch.zeros(
[len(self.param_list) + 1], dtype=torch.double
)
self._update_norms = torch.zeros_like(self._weight_norms)
for i, p in enumerate(self.param_list):
s = offset_array[i]
e = offset_array[i] + size_array[i]
p.data = self._flat_w[s:e].view_as(p.data).copy_(p.data)
if p.grad is None:
p.grad = self._flat_g[s:e].view_as(p.data)
else:
p.grad = self._flat_g[s:e].view_as(p.data).copy_(p.grad.data)
def _one_time_setup(self):
if self._one_time_setup_done is True:
return
from collections import defaultdict
self.flat_params = []
for group in self.param_groups:
model_params = defaultdict(list)
for p in group["params"]:
# torch.distributed.broadcast(p, 0)
if not p.requires_grad:
continue
dt = p.dtype
model_params[dt].append(p)
for dt, param_list in model_params.items():
flat_buf = self.FlatBuffer(param_list, group, dt, self.block_size)
self.flat_params.append(flat_buf)
self._step = 0
self._acc_steps = 0
self._one_time_setup_done = True
def clip_grad_norm_(self, max_norm, norm_type=2):
if hasattr(self, "flat_params"):
grads = [fp._flat_g for fp in self.flat_params]
else:
grads = [p.grad for group in self.param_groups for p in group["params"]]
return clip_grad_norm_(grads, max_norm, norm_type, grad_list=True)
def sync_params(self):
if not self.distributed:
return
if hasattr(self, "flat_params"):
for fp in self.flat_params:
torch.distributed.broadcase(fp._flat_w.data, 0)
else:
for group in self.param_groups:
for p in group["params"]:
torch.distributed.broadcase(p.data, 0)
def sync_grads(self):
if not self.distributed:
return
acc_steps = self.merge_acc_grad(avg=False)
world_size = torch.distributed.get_world_size() * acc_steps
if hasattr(self, "flat_params"):
for fp in self.flat_params:
fp._flat_g.div_(world_size)
# if torch.distributed.get_rank() == 0: print(f"{fp._flat_g.dtype} - {fp._flat_g.shape}")
torch.distributed.all_reduce(fp._flat_g)
# splts = fp._flat_g.split(2*1024*1024)
# for s in splts:
# torch.distributed.all_reduce(s)
else:
for group in self.param_groups:
for p in group["params"]:
p.grad.data.div_(world_size)
torch.distributed.all_reduce(p.grad.data)
def acc_and_zero_grad(self):
self._one_time_setup()
if hasattr(self, "flat_params"):
for fp in self.flat_params:
fp._flat_ag.add_(fp._flat_g)
fp._flat_g.zero_()
self._acc_steps += 1
else:
raise NotImplementedError
def merge_acc_grad(self, avg=True):
if self._acc_steps == 0:
return 1
total_acc_steps = self._acc_steps + 1
if hasattr(self, "flat_params"):
for fp in self.flat_params:
fp._flat_g.add_(fp._flat_ag)
if avg:
fp._flat_g.div_(total_acc_steps)
fp._flat_ag.zero_()
self._acc_steps = 0
return 1 if avg else total_acc_steps
else:
raise NotImplementedError
def zero_grad(self):
if hasattr(self, "flat_params"):
for fp in self.flat_params:
fp._flat_g.zero_()
else:
super(DistLamb, self).zero_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
self._one_time_setup()
self._step += 1
self.state[self.param_groups[0]["params"][0]]["step"] = self._step
self.merge_acc_grad()
if self.perform_allreduce:
self.sync_grads()
for ii, fp in enumerate(self.flat_params):
group = fp.group
beta1, beta2 = group["betas"]
lr = group["lr"]
eps = group["eps"]
weight_decay = group["weight_decay"]
ipex_cpp.tpp_fused_lamb_v2(
fp._flat_w,
fp._flat_g,
fp._flat_m,
fp._flat_v,
fp._flat_u,
fp._flat_wl,
fp._offsets,
fp._block_sizes,
fp._block2param,
fp._weight_norms,
fp._update_norms,
weight_decay,
beta1,
beta2,
lr,
eps,
fp.block_size,
self._step,
self.fused_param_norm,
)
# if weight_decay > 0.0 and torch.distributed.get_rank() < 2:
# print(f"wn: {fp._weight_norms[:5].sqrt()} un: {fp._update_norms[:5].sqrt()}")
# if weight_decay > 0.0:
# print(f"XXX {self._step:3d} NORM {ii}: wn: \
# {fp._weight_norms[0].sqrt().item():.10f} un: {fp._update_norms[0].sqrt().item():.10f}")
return loss
| 29,296 | 38.590541 | 111 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/tpp/utils/blocked_layout.py | import torch
# import math
# from enum import Enum
# from collections import OrderedDict
def _prod(myList):
ret = 1
for x in myList:
if x is None:
return None
ret = ret * x
return ret
def get_vnni_blocking(dtype):
if dtype == torch.float32:
return 1
elif dtype == torch.bfloat16:
return 2
elif dtype == torch.bfloat8:
return 4
else:
raise ValueError(f"Unsupported dtype {dtype}")
class BlockingManager(object):
def __init__(self, orig_shape, blocking_factors=None, permute=None):
dims = len(orig_shape)
if blocking_factors is None:
blocking_factors = [None] * dims
else:
assert len(blocking_factors) == dims
self.orig_shape = orig_shape
view_sizes = []
for i in range(dims):
if blocking_factors[i] is None:
view_sizes.append(orig_shape[i])
else:
if isinstance(blocking_factors[i], int):
assert orig_shape[i] % blocking_factors[i] == 0, (
"Blocking factor doesn't divide dim evenly shape = %s, BF = %s"
% (orig_shape, blocking_factors)
)
view_sizes.append(orig_shape[i] // blocking_factors[i])
view_sizes.append(blocking_factors[i])
elif isinstance(blocking_factors[i], (list, tuple)):
total_blocked = _prod(blocking_factors[i])
assert orig_shape[i] % total_blocked == 0
view_sizes.append(orig_shape[i] // total_blocked)
view_sizes.extend(blocking_factors[i])
else:
raise ValueError(
"Unsupported blocking factor: %s" % (blocking_factors[i],)
)
back_permute = None
blocked_shape = view_sizes
if permute is not None:
assert isinstance(permute, (list, tuple))
plen = len(permute)
assert plen == len(view_sizes)
assert all([i in permute for i in range(plen)])
back_permute = [None] * plen
blocked_shape = [None] * plen
for i in range(plen):
back_permute[permute[i]] = i
blocked_shape[i] = view_sizes[permute[i]]
self.permute = permute
self.back_permute = back_permute
self.view_shape = view_sizes
self.blocked_shape = blocked_shape
def block(self, input):
assert input.shape == self.orig_shape
output = input.view(self.view_shape)
if self.permute:
output = output.permute(self.permute).contiguous()
return output
def unblock(self, input):
assert list(input.shape) == self.blocked_shape, "Shapes: %s, %s" % (
input.shape,
self.blocked_shape,
)
output = input
if self.back_permute:
output = output.permute(self.back_permute).contiguous()
output = output.view(self.orig_shape)
return output
def get_blocking_signature(plain_layout_str, blocked_layout_str):
return [
[j for j, d in enumerate(blocked_layout_str) if d == c]
for i, c in enumerate(plain_layout_str)
]
def _get_block_sizes(blocked_shape, blocking_signeture, dim):
return [blocked_shape[d] for d in blocking_signeture[dim]]
def _get_plain_size(blocked_shape, blocking_signeture, dim):
return _prod(_get_block_sizes(blocked_shape, blocking_signeture, dim))
def _get_plain_shape(blocked_shape, blocking_signeture):
return [
_prod([blocked_shape[d] for d in dim_list]) for dim_list in blocking_signeture
]
def _get_permute_list(blocking_signeture):
return [item for sublist in blocking_signeture for item in sublist]
class BlockedTensor(object):
def __init__(self, data, blocking_signeture=None, plain_dtype=None, **kwargs):
self._t = torch.as_tensor(data, **kwargs)
self.blocking_signeture = blocking_signeture
self.permute_list = None
self.plain_shape = None
self.plain_dtype = plain_dtype if plain_dtype else self._t.dtype
def __repr__(self):
return "Blocking_signature:\n{}\n\ndata:\n{}".format(
self.blocking_signeture, self._t
)
def get_plain_shape(self, dim=None):
if dim is None:
if self.plain_shape:
return self.plain_shape
self.plain_shape = torch.Size(
_get_plain_shape(self._t.shape, self.blocking_signeture)
)
return self.plain_shape
else:
return self.get_plain_shape()[dim]
def get_permute_list(self):
if self.permute_list:
return self.permute_list
self.permute_list = _get_permute_list(self.blocking_signeture)
return self.permute_list
def get_blocked_dim(self):
return self._t.dim()
def get_plain_dim(self):
return len(self.blocking_signeture)
def get_plain_size(self, dim):
plain_shape = self.get_plain_shape()
return plain_shape[dim]
# return _get_plain_size(self._t.shape, self.blocking_signeture, dim)
def get_plain_dtype(self):
return self.plain_dtype
def get_block_sizes(self, dim):
return _get_block_sizes(self._t.shape, self.blocking_signeture, dim)
def blocked_tensor(self):
return self._t
def unblocked_tensor(self):
permute_list = self.get_permute_list()
plain_shape = self.get_plain_shape()
plain_dtype = self.get_plain_dtype()
# print("BlockedTensor returning unblocked tensor with shape %s" % (plain_shape,))
return (
self._t.permute(permute_list).contiguous().view(plain_shape).to(plain_dtype)
)
def get_signature(self):
return self.blocking_signeture
def __getitem__(self, key):
return self.unblocked_tensor().__getitem__(key)
def __getattr__(self, attr):
# print("requiested attr: %s" % attr)
if attr == "shape":
return torch.Size(self.get_plain_shape())
if attr == "dtype":
return self.get_plain_dtype()
elif attr == "size":
return self.get_plain_shape
elif attr == "dim":
return self.get_plain_dim
elif attr == "mean":
return getattr(self._t, attr)
elif attr == "detach":
return getattr(self.unblocked_tensor(), attr)
elif attr == "view":
return getattr(self.unblocked_tensor(), attr)
# elif hasattr(self._t, attr): return getattr(self._t, attr)
else:
raise AttributeError("BlockedTensor doesn't support attr %s" % attr)
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# args = [a._t if hasattr(a, '_t') else a for a in args]
args = [
a.unblocked_tensor() if isinstance(a, BlockedTensor) else a for a in args
]
ret = func(*args, **kwargs)
# return MetadataTensor(ret, metadata=self._metadata)
return ret
class BlockedParameter(torch.nn.Parameter):
def __new__(cls, data=None, requires_grad=True):
return super(BlockedParameter, cls).__new__(
cls, data=data, requires_grad=requires_grad
)
def __init__(self, *args, **kwarg):
# super(BlockedParameter, self).__init__(*args, **kwarg)
self.blocked = False
self.blocking_param = None
self.blocking_manager = None
def set_blocking_param(self, blocking_param):
self.blocking_param = blocking_param
def is_blocked(self):
return self.blocked
def block(self):
if self.blocked:
return
if self.blocking_manager is None:
if self.blocking_param is None:
return
self.unblocked_dtype = self.dtype
self.blocked_dtype = (
self.blocking_param[2] if len(self.blocking_param) > 2 else self.dtype
)
self.blocking_manager = BlockingManager(
self.data.shape,
blocking_factors=self.blocking_param[0],
permute=self.blocking_param[1],
)
self.data = self.blocking_manager.block(self.data).to(self.blocked_dtype)
if self.grad is not None:
self.grad.data = self.blocking_manager.block(self.grad.data).to(
self.blocked_dtype
)
self.blocked = True
def unblock(self):
if not self.blocked:
return
assert self.blocking_manager is not None
self.data = self.blocking_manager.unblock(self.data).to(self.unblocked_dtype)
if self.grad is not None:
self.grad.data = self.blocking_manager.unblock(self.grad.data).to(
self.unblocked_dtype
)
self.blocked = False
class BlockedModule(torch.nn.Module):
def _save_to_state_dict(self, destination, prefix, keep_vars):
blocked_params = []
for p in self.parameters(recurse=False):
if isinstance(p, BlockedParameter) and p.is_blocked():
p.unblock()
blocked_params.append(p)
super(BlockedModule, self)._save_to_state_dict(destination, prefix, keep_vars)
for p in blocked_params:
p.block()
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
print("_save_to_state_dict Called - %s" % prefix)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
blocked_params = []
for p in self.parameters(recurse=False):
if isinstance(p, BlockedParameter) and p.is_blocked():
p.unblock()
blocked_params.append(p)
super(BlockedModule, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
for p in blocked_params:
p.block()
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
print("_load_from_state_dict Called - %s" % prefix)
@staticmethod
def default_blocking_factors(S):
blocking_prio_list = (
[64, 48, 32, 24, 16] + list(range(62, 11, -2)) + list(range(63, 10, -2))
)
for bs in blocking_prio_list:
if S % bs == 0:
return [S // bs, bs]
return [1, S]
@staticmethod
def get_blocked_tensor(tensor, signature, blocking_factors=None):
if isinstance(tensor, BlockedTensor):
if tensor.get_signature() == signature:
blocked_tensor = tensor.blocked_tensor()
# print("Reusing blocked tensor with shape %s" % (blocked_tensor.shape,))
return blocked_tensor
else:
raise TypeError("Blocked tensor signature doesn't match")
else:
# print("Converting to blocked tensor with shape %s" % (tensor.shape,))
dim = tensor.dim()
assert len(signature) == dim, "Tensor shape doesn't match with signature"
if blocking_factors is None:
blocking_factors = [None] * dim
assert (
len(blocking_factors) == dim
), "Tensor shape doesn't match with blocking_factors"
view_shape = []
back_permute = []
plain_shape = tensor.shape
for i, dl in enumerate(signature):
back_permute += dl
if len(dl) == 1:
view_shape.append(plain_shape[i])
else:
nf = len(dl)
bf = None
if nf == 2:
if blocking_factors[i] is None:
bf = BlockedModule.default_blocking_factors(plain_shape[i])
else:
if isinstance(blocking_factors[i], int):
bf = [
plain_shape[i] // blocking_factors[i],
blocking_factors[i],
]
else:
raise ValueError("blocking_factors is not Integer")
else:
raise ValueError(
"Blocking to more than 2 dims not supported yet"
)
view_shape += bf
permute = [None] * len(back_permute)
for i in range(len(back_permute)):
permute[back_permute[i]] = i
return tensor.view(view_shape).permute(permute).contiguous()
def block_model_params(model):
for m in model.modules():
if hasattr(m, "maybe_block_params"):
m.maybe_block_params()
class TestModule(BlockedModule):
def __init__(self):
super(BlockedModule, self).__init__()
self.param1 = BlockedParameter(torch.arange(10.0))
self.param1.set_blcoking_param(
(
[5],
[1, 0],
)
)
self.param2 = torch.nn.Parameter(torch.arange(3.0))
def forward(self):
print("Shape", self.param1.shape)
self.param1.block()
print("Blocked shape", self.param1.shape)
return self.param1
if __name__ == "__main__":
M = TestModule()
print(list(M.parameters()))
y = M()
print(list(M.parameters()))
y = M()
# print(list(M.state_dict()))
torch.save(M.state_dict(), "tmp.pth")
print(list(M.parameters()))
M.param1.data = M.param1.data * 2.0
print(list(M.parameters()))
state_dict = torch.load("tmp.pth")
print("state_dict:", state_dict)
M.load_state_dict(state_dict)
print(list(M.parameters()))
print(torch.load("tmp.pth"))
| 14,240 | 33.069378 | 90 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/utils/_cpu_isa.py | # This Python file uses the following encoding: utf-8
import intel_extension_for_pytorch._isa_help as isa
import sys
def check_avx2_support():
return isa._check_isa_avx2()
def check_minimal_isa_support():
err_msg = "ERROR! Intel® Extension for PyTorch* only works on machines with instruction sets equal or newer \
than AVX2, which are not detected on the current machine."
if not check_avx2_support():
sys.exit(err_msg)
| 454 | 27.4375 | 113 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/utils/linear_bn_folding.py | import torch.nn as nn
import torch.fx as fx
import torch.fx.experimental.optimization as optimization
from torch.nn.utils.fusion import fuse_linear_bn_eval
import copy
def linear_bn_fuse(model: nn.Module, inplace=False) -> nn.Module:
# implementation follows https://github.com/pytorch/pytorch/blob/master/torch/fx/experimental/optimization.py#L50
patterns = [
(nn.Linear, nn.BatchNorm1d),
(nn.Linear, nn.BatchNorm2d),
(nn.Linear, nn.BatchNorm3d),
]
if not inplace:
model = copy.deepcopy(model)
fx_model = fx.symbolic_trace(model)
modules = dict(fx_model.named_modules())
new_graph = copy.deepcopy(fx_model.graph)
for pattern in patterns:
for node in new_graph.nodes:
if optimization.matches_module_pattern(pattern, node, modules):
if len(node.args[0].users) > 1:
continue
linear = modules[node.args[0].target]
bn = modules[node.target]
if not bn.track_running_stats:
continue
fused_linear = fuse_linear_bn_eval(linear, bn)
optimization.replace_node_module(node.args[0], modules, fused_linear)
node.replace_all_uses_with(node.args[0])
new_graph.erase_node(node)
return fx.GraphModule(fx_model, new_graph)
| 1,366 | 35.945946 | 117 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/utils/_custom_fx_tracer.py | import torch
import torch.fx as fx
import types
def override_is_leaf_module():
fx_tracer = fx.Tracer
orig_is_leaf_module_fn = fx_tracer.is_leaf_module
def ipex_is_leaf_module_fn(
self, m: torch.nn.Module, module_qualified_name: str
) -> bool:
is_ipex = m.__module__.startswith("intel_extension_for_pytorch.nn")
return is_ipex or orig_is_leaf_module_fn(self, m, module_qualified_name)
fx_tracer.is_leaf_module = types.MethodType(ipex_is_leaf_module_fn, fx_tracer)
override_is_leaf_module()
| 539 | 26 | 82 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/utils/verbose.py | import torch
import intel_extension_for_pytorch._C as core
VERBOSE_OFF = 0
VERBOSE_ON = 1
VERBOSE_ON_CREATION = 2
class verbose(object):
"""
On-demand oneDNN verbosing functionality
To make it easier to debug performance issues, oneDNN can dump verbose
messages containing information like kernel size, input data size and
execution duration while executing the kernel. The verbosing functionality
can be invoked via an environment variable named `DNNL_VERBOSE`. However,
this methodology dumps messages in all steps. Those are a large amount of
verbose messages. Moreover, for investigating the performance issues,
generally taking verbose messages for one single iteration is enough.
This on-demand verbosing functionality makes it possible to control scope
for verbose message dumping. In the following example, verbose messages
will be dumped out for the second inference only.
.. highlight:: python
.. code-block:: python
import intel_extension_for_pytorch as ipex
model(data)
with ipex.verbose(ipex.verbose.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
- ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
:meta public:
"""
def __init__(self, level):
self.level = level
def __enter__(self):
if self.level == VERBOSE_OFF:
return
try:
st = torch._C._verbose.mkldnn_set_verbose(self.level)
assert bool(
st
), "Failed to set Verbose mode of MKLDNN in PyTorch. Please consider to disable this verbose scope."
except BaseException:
pass
st = core.mkldnn_set_verbose(self.level)
assert bool(
st
), "Failed to set Verbose mode of MKLDNN in IPEX. Please consider to disable this verbose scope."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
core.mkldnn_set_verbose(VERBOSE_OFF)
try:
torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
except BaseException:
pass
return False
try:
verbose_torch = torch.backends.mkldnn.verbose
torch.backends.mkldnn.verbose = verbose
except BaseException:
pass
| 2,400 | 30.592105 | 112 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/hypertune/conf/config.py | import copy
import os
from pathlib import Path
import ast
import re
import yaml
from schema import Schema, And, Use, Optional, Or, Hook
from .dotdict import DotDict
from ..strategy import STRATEGIES
from intel_extension_for_pytorch.cpu.launch import CPUPoolList
# ### tuning ####
tuning_default = {"strategy": "grid", "max_trials": 100}
def _valid_strategy(data):
data = data.lower()
assert data in STRATEGIES, f"Tuning strategy {data} is NOT supported"
return data
tuning_schema = Schema(
{
Optional("strategy", default="grid"): And(str, Use(_valid_strategy)),
Optional("max_trials", default=100): int,
}
)
# ### output_dir ###
output_dir_default = os.getcwd() + "/"
output_dir_schema = Schema(str)
# ### objective ###
objective_schema = Schema(
{
"name": str,
Optional("higher_is_better", default=False): bool,
Optional("target_val", default=-float("inf")): And(Or(int, float)),
}
)
# ### hyperparams ###
# ### launcher ###
# default values if not tuning
launcher_hyperparam_default_val = {
"ncore_per_instance": [-1],
"ncores_per_instance": [-1],
"ninstances": [-1],
"use_all_nodes": [True],
"use_logical_core": [False],
"use_logical_cores": [False],
"disable_numactl": [False],
"disable_iomp": [False],
"malloc": ["tc"],
}
# default search spaces if not user-specified
cpuinfo = CPUPoolList().pool_all
is_hyperthreading_enabled = len([c for c in cpuinfo if not c.is_physical_core]) > 0
launcher_hyperparam_default_search_space = {
"hp": [
"ncore_per_instance",
"ncores_per_instance",
"ninstances",
"use_all_nodes",
"use_logical_core",
"use_logical_cores",
"disable_numactl",
"disable_iomp",
"malloc",
],
"ncore_per_instance": "all_logical_cores",
"ncores_per_instance": "all_logical_cores",
"ninstances": "all_logical_cores",
"use_all_nodes": [True, False],
"use_logical_core": [True, False],
"use_logical_cores": [True, False],
"disable_numactl": [True, False],
"disable_iomp": [True, False],
"malloc": ["pt", "tc", "je"],
}
def _valid_launcher_schema(key, scope, error):
if isinstance(scope[key], str):
assert scope[key] == "all_physical_cores" or scope[key] == "all_logical_cores"
def input_str_to_list_int(data):
if isinstance(data, str):
if data == "all_physical_cores":
return [c.cpu + 1 for c in cpuinfo if c.is_physical_core]
elif data == "all_logical_cores":
return [c.cpu + 1 for c in cpuinfo]
assert isinstance(data, list)
return data
launcher_schema = Schema(
{
"hp": And(list, lambda s: all(isinstance(i, str) for i in s)),
Hook("ncore_per_instance", handler=_valid_launcher_schema): object,
Optional("ncore_per_instance", default="all_logical_cores"): And(
Or(str, list),
Use(input_str_to_list_int),
lambda s: all(isinstance(i, int) for i in s),
),
Hook("ncores_per_instance", handler=_valid_launcher_schema): object,
Optional("ncores_per_instance", default="all_logical_cores"): And(
Or(str, list),
Use(input_str_to_list_int),
lambda s: all(isinstance(i, int) for i in s),
),
Hook("ninstances", handler=_valid_launcher_schema): object,
Optional("ninstances", default="all_logical_cores"): And(
Or(str, list),
Use(input_str_to_list_int),
lambda s: all(isinstance(i, int) for i in s),
),
Optional(
"use_all_nodes",
default=[True, False]
if len(set([c.node for c in cpuinfo])) > 1
else [True],
): And(list, lambda s: all(isinstance(i, bool) for i in s)),
Optional(
"use_logical_core",
default=[True, False] if is_hyperthreading_enabled else [False],
): And(list, lambda s: all(isinstance(i, bool) for i in s)),
Optional(
"use_logical_cores",
default=[True, False] if is_hyperthreading_enabled else [False],
): And(list, lambda s: all(isinstance(i, bool) for i in s)),
Optional("disable_numactl", default=[True, False]): And(
list, lambda s: all(isinstance(i, bool) for i in s)
),
Optional("disable_iomp", default=[True, False]): And(
list, lambda s: all(isinstance(i, bool) for i in s)
),
Optional("malloc", default=["pt", "tc", "je"]): And(
list, lambda s: all(isinstance(i, str) for i in s)
),
}
)
hyperparams_default = {"launcher": launcher_hyperparam_default_search_space}
hyperparams_schema = Schema(
{
Optional("launcher"): launcher_schema,
}
)
schema = Schema(
{
# tuning
Optional("tuning", default=tuning_default): tuning_schema,
# hyperparams
Optional("hyperparams", default=hyperparams_default): hyperparams_schema,
# output_dir
Optional("output_dir", default=output_dir_default): output_dir_schema,
}
)
# reference: https://github.com/intel/neural-compressor/blob/15477100cef756\
# e430c8ef8ef79729f0c80c8ce6/neural_compressor/conf/config.py
class Conf(object):
def __init__(self, conf_fpath, program_fpath, program_args):
assert Path(conf_fpath).exists(), f"{conf_fpath} does not exist"
self.execution_conf = DotDict(
schema.validate(
self._convert_conf(
self._read_conf(conf_fpath), copy.deepcopy(schema.validate(dict()))
)
)
)
assert Path(program_fpath).exists(), f"{program_fpath} does not exist"
self.program = program_fpath
self.program_args = program_args
self.usr_objectives = self._extract_usr_objectives(self.program)
def _read_conf(self, conf_fpath):
try:
with open(conf_fpath, "r") as f:
content = f.read()
conf = yaml.safe_load(content)
validated_conf = schema.validate(conf)
return validated_conf
except BaseException:
raise RuntimeError(
"The yaml file format is not correct. Please refer to document."
)
def _convert_conf(self, src, dst):
hyperparam_default_val = {"launcher": launcher_hyperparam_default_val}
for k in dst:
if k == "hyperparams":
dst_hps = set(dst["hyperparams"])
for tune_x in dst_hps:
# case 1: tune {launcher}
if tune_x in src["hyperparams"]:
for hp in dst["hyperparams"][tune_x]["hp"]:
# case 1.1: not tune hp, use hp default val
if hp not in src["hyperparams"][tune_x]["hp"]:
dst["hyperparams"][tune_x][hp] = hyperparam_default_val[
tune_x
][hp]
# case 1.2: tune hp, use default or user defined search space
else:
dst["hyperparams"][tune_x][hp] = src["hyperparams"][
tune_x
][hp]
# case 2: not tune {launcher}
else:
del dst["hyperparams"][tune_x]
elif k == "output_dir":
if src[k] != dst[k]:
path = os.path.dirname(
src[k] if src[k].endswith("/") else src[k] + "/"
)
if not os.path.exists(path):
os.makedirs(path)
dst[k] = path
else:
dst[k] = src[k]
return dst
def _extract_usr_objectives(self, program_fpath):
# e.g. [{'name': 'latency', 'higher_is_better': False, 'target_val': 0},
# {'name': 'throughput', 'higher_is_better':True, 'target_val': 100}]
HYPERTUNE_TOKEN = "@hypertune"
def _parse_hypertune_token(line):
pattern = r'print\("@hypertune (.*?)"\)'
lineseg = re.search(pattern, line)
try:
line = lineseg.group(1)
objective = ast.literal_eval(line)
objective = objective_schema.validate(objective)
except BaseException:
raise RuntimeError(
f"Parsing @hypertune failed for line {line} of {program_fpath} file"
)
return objective
with Path(program_fpath).open("r") as f:
text = f.read()
lines = text.splitlines()
return [_parse_hypertune_token(l) for l in lines if HYPERTUNE_TOKEN in l]
| 8,887 | 33.184615 | 89 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/cpu/hypertune/example/resnet50.py | import torch
import torchvision.models as models
def inference(model, data):
with torch.no_grad():
# warm up
for _ in range(100):
model(data)
# measure
import time
measure_iter = 100
start = time.time()
for _ in range(measure_iter):
output = model(data)
end = time.time()
duration = (end - start) * 1000
latency = duration / measure_iter
throughput = measure_iter / duration
print(
"@hypertune {'name': 'latency (ms)'}"
) # Add print statement of the form @hypertune {'name': str, 'higher_is_better': bool, 'target_val': int or float}`
print(
latency
) # Print the objective(s) you want to optimize. Make sure this is just an int or float to be minimzied or maximized.
def main(args):
model = models.resnet50(pretrained=False)
model.eval()
data = torch.rand(1, 3, 224, 224)
import intel_extension_for_pytorch as ipex
model = model.to(memory_format=torch.channels_last)
data = data.to(memory_format=torch.channels_last)
if args.dtype == "float32":
model = ipex.optimize(model, dtype=torch.float32)
elif args.dtype == "bfloat16":
model = ipex.optimize(model, dtype=torch.bfloat16)
else: # int8
from intel_extension_for_pytorch.quantization import prepare, convert
qconfig = ipex.quantization.default_static_qconfig
model = prepare(model, qconfig, example_inputs=data, inplace=False)
# calibration
n_iter = 100
for i in range(n_iter):
model(data)
model = convert(model)
with torch.cpu.amp.autocast(enabled=args.dtype == "bfloat16"):
if args.torchscript:
with torch.no_grad():
model = torch.jit.trace(model, data)
model = torch.jit.freeze(model)
inference(model, data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--dtype", default="float32", choices=["float32", "bfloat16", "int8"]
)
parser.add_argument("--torchscript", default=False, action="store_true")
main(parser.parse_args())
| 2,247 | 27.455696 | 126 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/utils/channels_last_1d.py | import torch
# This is a work-around to convert 3d tensor to channels last format.
# Theoretically, transpose(permute/view)-contiguous(to)-transpose(permute/view)
# can convert 3d tensor to channels last. However, this formula cannot convert all
# the shapes. It is because tensor suggest_memory_format may be different from
# exact memory format. is_contiguous() shows exact memory format and in c++ code,
# channels last chain is based on suggest_memory_format.
# We test several inputs, find that most of shapes can be converted to channels last,
# except for N1W format. It needs use as_strided to convert to channels last.
def tensor_to_channels_last_1d(t):
assert t.dim() == 3
if 1 == t.size(0) and 1 != t.size(1):
# handle for tensor shape like (1, x, y), x != 1
t = t.transpose(1, -1).contiguous().transpose(1, -1)
elif 1 == t.size(1):
# handle for tensor shape like (x, 1, y), include x == 1
t = t.as_strided(t.size(), (t.size(1) * t.size(-1), 1, t.size(1)))
else:
t = t.view(t.size(0), t.size(1), 1, t.size(2))
t = t.to(memory_format=torch.channels_last)
t = t.view(t.size(0), t.size(1), t.size(3))
return t
def to_channels_last_1d(t):
scope = torch.nn.Conv1d
if isinstance(t, torch.nn.Module):
for m in t.modules():
for param in m.parameters():
if isinstance(m, scope) and 3 == param.data.dim():
param.data = tensor_to_channels_last_1d(param.data)
return t
if 3 == t.dim():
t = tensor_to_channels_last_1d(t)
return t
def is_contiguous_channels_last_1d(input):
if 3 != input.dim():
return False
tmpTen = input.view(input.size(0), input.size(1), 1, input.size(2))
if tmpTen.is_contiguous(memory_format=torch.channels_last):
return True
else:
return False
| 1,878 | 35.134615 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_smooth_quant.py | import torch
from torch.ao.quantization import (
UniformQuantizationObserverBase,
HistogramObserver,
PerChannelMinMaxObserver,
)
import copy
class SmoothQuantActivationObserver(UniformQuantizationObserverBase):
"""
For SmoothQuant, see https://arxiv.org/pdf/2211.10438.pdf
Activation shape = T * IC (tokens * input channels)
If smooth_quant_enabled is True (e.g. for nn.Linear module)
1. Find max(|X_j|) for each IC
2. Get max(|W_j|) for each IC in weight from the weight observer
3. Calculate scaling factors for each IC by s_j = (max(|W_j|) ** (1 - alpha)) / (max(|X_j|) ** alpha)
Note that factors for activation are reciprocals of that for weight
4. Apply s_j to activation
5. Find q-params per tensor and return
If smooth_quant_enabled is False (i.e. for other ops, including functional linear),
just act as a normal observer
"""
# As a 1d tensor, not diagonal
scaling_factors: torch.Tensor
def __init__(
self,
act_observer=None,
act_ic_observer=None,
smooth_quant_enabled=False, # if false, act as a normal observer
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
alpha=0.5,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
) -> None:
super().__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
self.weight_obs = None
if act_ic_observer is None:
self.ic_obs = PerChannelMinMaxObserver(
ch_axis=-1,
dtype=dtype,
qscheme=torch.per_channel_affine,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
else:
assert isinstance(act_ic_observer, UniformQuantizationObserverBase), (
f"act_ic_observer should be an instance of UniformQuantizationObserverBase "
f"or its subclass but got {type(act_ic_observer)}"
)
assert hasattr(
act_ic_observer, "ch_axis"
), "act_ic_observer should be a per-channel observer and observe input channel axis"
self.ic_obs = act_ic_observer
if act_observer is None:
self.act_obs = HistogramObserver(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
else:
assert isinstance(act_observer, UniformQuantizationObserverBase)
self.act_obs = act_observer
# if smooth_quant_enabled is false, this observer acts as
# a normal per-tensor observer
self.smooth_quant_enabled = smooth_quant_enabled
self.alpha = alpha
# Normally we don't use min_val or max_val here
# They are for checks, like `_check_observer_has_run`
self.min_val = self.act_obs.min_val
self.max_val = self.act_obs.max_val
def forward(self, x_orig):
if not self.smooth_quant_enabled:
return self.act_obs.forward(x_orig)
# Call per-channel observer on IC to find scaling factor
return self.ic_obs.forward(x_orig)
@torch.jit.export
def calculate_qparams(self):
if not self.smooth_quant_enabled:
return self.act_obs.calculate_qparams()
# Get weight per IC min/max from weight observer
wei_min_per_ic = self.weight_obs.min_val
wei_max_per_ic = self.weight_obs.max_val
act_min_per_ic = self.ic_obs.min_val
act_max_per_ic = self.ic_obs.max_val
x_abs_max_per_ic = (
torch.max(torch.abs(act_min_per_ic), torch.abs(act_max_per_ic)) + 1e-6
)
w_abs_max_per_ic = (
torch.max(torch.abs(wei_min_per_ic), torch.abs(wei_max_per_ic)) + 1e-6
)
# Note: activation's scaling factors are reciprocals of weight's
self.scaling_factors = torch.pow(w_abs_max_per_ic, 1 - self.alpha) / torch.pow(
x_abs_max_per_ic, self.alpha
)
# Apply scaling factors to each IC's min/max
act_min_per_ic_new = act_min_per_ic * self.scaling_factors.reshape(
act_min_per_ic.shape
)
act_max_per_ic_new = act_max_per_ic * self.scaling_factors.reshape(
act_max_per_ic.shape
)
min_val_per_tensor = torch.min(act_min_per_ic_new)
max_val_per_tensor = torch.max(act_max_per_ic_new)
return self._calculate_qparams(min_val_per_tensor, max_val_per_tensor)
def get_scaling_factors(self):
if not self.smooth_quant_enabled:
return None
return self.scaling_factors
def extra_repr(self):
return "smooth_quant_enabled={}, alpha={}".format(
self.smooth_quant_enabled, self.alpha
)
class SmoothQuantWeightObserver(UniformQuantizationObserverBase):
"""
For SmoothQuant, see https://arxiv.org/pdf/2211.10438.pdf
Weight shape = OC * IC (output channels * input channels)
If smooth_quant_enabled is True (e.g. for nn.Linear module)
1. Find max(|W_j|) for each IC
2. Get max(|X_j|) for each IC in activation from the activation observer
3. Calculate scaling factors for each IC by s_j = (max(|X_j|) ** alpha) / (max(|W_j|) ** (1 - alpha))
Note that factors for weight are reciprocals of that for activation
4. Apply s_j to weight
5. Find q-params per OC and return
If smooth_quant_enabled is False (i.e. for other ops, including functional linear),
just act as a normal observer
"""
# As a 1d tensor, not diagonal
scaling_factors: torch.Tensor
# Need to keep original weight to calculate q-param after applying scaling factors
w_orig: torch.Tensor
def __init__(
self,
wei_observer=None,
wei_ic_observer=None,
smooth_quant_enabled=False, # if false, act as a normal observer
dtype=torch.qint8,
qscheme=torch.per_channel_symmetric,
reduce_range=False,
quant_min=None,
quant_max=None,
alpha=0.5,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
) -> None:
super().__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
self.act_obs = None
if wei_observer is None:
self.oc_obs = PerChannelMinMaxObserver(
ch_axis=0,
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
else:
assert isinstance(wei_observer, UniformQuantizationObserverBase)
self.oc_obs = wei_observer
if wei_ic_observer is None:
self.ic_obs = PerChannelMinMaxObserver(
ch_axis=1,
dtype=dtype,
qscheme=torch.per_channel_affine,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
else:
assert isinstance(wei_ic_observer, UniformQuantizationObserverBase), (
f"wei_ic_observer should be an instance of UniformQuantizationObserverBase "
f"or its subclass but got {type(wei_ic_observer)}"
)
assert hasattr(
wei_ic_observer, "ch_axis"
), "wei_ic_observer should be a per-channel observer and observe input channel axis"
self.ic_obs = wei_ic_observer
# if smooth_quant_enabled is false, this observer acts as
# a normal observer
self.smooth_quant_enabled = smooth_quant_enabled
self.alpha = alpha
# Normally we don't use min_val or max_val here
# They are for checks, like `_check_observer_has_run`
self.min_val = self.oc_obs.min_val
self.max_val = self.oc_obs.max_val
def forward(self, x_orig):
if not self.smooth_quant_enabled:
return self.oc_obs.forward(x_orig)
# Copy original weight to apply scaling factor
self.w_orig = copy.deepcopy(x_orig)
# Call per-channel observer on IC to find scaling factor
return self.ic_obs.forward(x_orig)
@torch.jit.export
def calculate_qparams(self):
if not self.smooth_quant_enabled:
return self.oc_obs.calculate_qparams()
# Get activation min/max per IC from activation observer
act_min_per_ic = self.act_obs.min_val
act_max_per_ic = self.act_obs.max_val
wei_min_per_ic = self.ic_obs.min_val
wei_max_per_ic = self.ic_obs.max_val
w_abs_max_per_ic = (
torch.max(torch.abs(wei_min_per_ic), torch.abs(wei_max_per_ic)) + 1e-6
)
x_abs_max_per_ic = (
torch.max(torch.abs(act_min_per_ic), torch.abs(act_max_per_ic)) + 1e-6
)
# Note: weight's scaling factors are reciprocals of activation's
self.scaling_factors = torch.pow(x_abs_max_per_ic, self.alpha) / torch.pow(
w_abs_max_per_ic, 1 - self.alpha
)
# Apply scaling factors to original weight
# w.shape = [OC, IC], len(scaling_factors) = IC
w_new = torch.mul(self.w_orig, self.scaling_factors)
# Run per-channel observer on new weight and return q-params
self.oc_obs.reset_min_max_vals()
self.oc_obs.forward(w_new)
return self.oc_obs.calculate_qparams()
def get_scaling_factors(self):
if not self.smooth_quant_enabled:
return None
return self.scaling_factors
def extra_repr(self):
return "smooth_quant_enabled={}, alpha={}".format(
self.smooth_quant_enabled, self.alpha
)
| 10,525 | 37 | 105 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_utils.py | import enum
import json
from collections import OrderedDict
from typing import Callable, Optional
import inspect
import numbers
import torch
import torch.nn as nn
from torch import _VF
import torch.nn.functional as F
from torch.quantization.qconfig import QConfig
from intel_extension_for_pytorch.nn.functional import interaction
from ._quantization_state_utils import QTensorInfo
from ._smooth_quant import SmoothQuantActivationObserver, SmoothQuantWeightObserver
add_and_mul_ops = set(
[
torch.add,
torch.Tensor.add,
# TODO, mul op?
]
)
quantized_modules_has_weights = set(
[
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.Linear,
torch.nn.EmbeddingBag,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.LSTM,
]
)
# those ops only support int8->int8, not int8->fp32/bf16
int8_int8_ops = set(
[
str(F.adaptive_avg_pool2d),
str(F.adaptive_avg_pool3d),
str(F.avg_pool2d),
str(F.avg_pool3d),
str(F.max_pool2d),
str(F.max_pool3d),
str(nn.MaxPool2d),
str(nn.MaxPool3d),
str(nn.AvgPool2d),
str(nn.AvgPool3d),
str(nn.AdaptiveAvgPool2d),
str(nn.AdaptiveAvgPool3d),
str(torch.Tensor.relu),
str(torch.relu),
str(F.relu),
str(nn.ReLU),
str(torch.flatten),
str(torch.Tensor.flatten),
str(torch.nn.Flatten),
# the following op will be supported at next step.
# str(torch.Tensor.sigmoid),
# str(torch.sigmoid),
# str(F.sigmoid),
# str(nn.Sigmoid),
# str(F.gelu),
# str(nn.GELU),
# ipex customer op
str(interaction),
str(torch.ops.torch_ipex.interaction_forward),
str(torch.embedding_bag),
str(F.embedding_bag),
str(torch.nn.EmbeddingBag),
str(torch.nn.LSTM),
]
)
class OpQuantizeabilityType(enum.Enum):
QUANTIZEABLE = 0
NOT_QUANTIZEABLE = 1
class FuncOutputObsType(enum.Enum):
NONE = 0
NEW_OBS = 1
REUSES_FIRST_INPUT_OBS = 2
# quantizeable modules
def is_leaf(
m: torch.nn.Module,
) -> bool:
# TODO(future PR): extend to the rest of the container classes
container_classes = (
torch.nn.Sequential,
torch.nn.ModuleList,
)
return m.__module__.startswith("torch.nn") and (
not isinstance(m, container_classes)
)
def get_fqn_valid_for_module_dict_key(fqn: str) -> str:
"""
Modifies `fqn`(fully qualified name) to make it a valid key to a ModuleDict.
"""
if fqn == "":
fqn = " "
return fqn.replace(".", ":")
class HookType(enum.Enum):
"""
Describes the various types of function and module hooks that are used
to implement quantization syntax transforms.
"""
# Hooks which are run before, during and after a quantizeable op.
# Usually used for op input and output observation, subsituating
# quantized kernels, and dynamically looking up arguments to quantized
# kernels.
OP_HOOKS = 0
# Hooks which are run before or after a `torch.nn.Module` which
# is a non-leaf. Usually used for dtype transforms if the user requests
# that the inputs or outputs of a certain module are of some dtype.
MODULE_IO_HOOKS = 1
# Hooks which are run before a non-quantizeable op which requires
# `torch.float` inputs. Any inputs which are not floats are converted
# back to floats.
ARG_DEQUANTS = 2
# Everything else
NONE = 2
def get_torch_function_hook_type(
parent_module: Optional[torch.nn.Module],
func: Callable,
) -> HookType:
# the direct __dict__ accesses are for performance, because
# the default `torch.nn.Module.__getattr__` has overhead.
parent_module_has_qstate = (
parent_module is not None and "_auto_quant_state" in parent_module.__dict__
)
needs_op_hooks = parent_module_has_qstate and parent_module.__dict__[
"_auto_quant_state"
].cur_op_needs_hooks(
func
) # type: ignore[union-attr, operator]
if needs_op_hooks:
return HookType.OP_HOOKS
elif parent_module_has_qstate:
return HookType.ARG_DEQUANTS
else:
return HookType.NONE
def get_module_hook_type(
parent_module: Optional[torch.nn.Module],
cur_module: torch.nn.Module,
) -> HookType:
cached_hook_type = getattr(cur_module, "_auto_quant_module_hook_type", None)
if cached_hook_type is not None:
return cached_hook_type
parent_module_has_qstate = (
parent_module is not None and "_auto_quant_state" in parent_module.__dict__
)
needs_op_hooks = parent_module_has_qstate and parent_module.__dict__[
"_auto_quant_state"
].cur_op_needs_hooks(
cur_module
) # type: ignore[union-attr, operator]
# We need IO hooks if
# * we are calling forward on a module (always True here)
# * that module has quant state
# * that module does not need op hooks for the parent
needs_io_hooks = "_auto_quant_state" in cur_module.__dict__ and (not needs_op_hooks)
needs_arg_dequants = parent_module_has_qstate and not needs_op_hooks
if needs_op_hooks:
result = HookType.OP_HOOKS
elif needs_io_hooks:
result = HookType.MODULE_IO_HOOKS
elif needs_arg_dequants:
result = HookType.ARG_DEQUANTS
else:
result = HookType.NONE
cur_module._auto_quant_module_hook_type = result # type: ignore[assignment]
return result
def attach_scale_zp_values_to_model(
module: torch.nn.Module,
) -> None:
"""
Calculates the scale and zero_point from each observer and attaches
these values to the parent module. This is done to avoid recalculating
these values at inference.
"""
if hasattr(module, "_auto_quant_state"):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
quantized_dtype = [torch.quint8, torch.qint8]
for tensor_id, observer in qstate.tensor_id_to_observer.items():
if observer.dtype in quantized_dtype:
scale, zp = observer.calculate_qparams()
qstate.tensor_id_to_scale_zp[int(tensor_id)] = (scale, zp)
else:
AssertionError(
False
), "The observer's dtype only can be torch.quint8 or torch.qint8"
for tensor_id, observer in qstate.weight_tensor_id_to_observer.items():
if observer.dtype in quantized_dtype:
scale, zp = observer.calculate_qparams()
qstate.weight_tensor_id_to_scale_zp[tensor_id] = (scale, zp)
else:
AssertionError(
False
), "The observer's dtype only can be torch.quint8 or torch.qint8"
_attach_smooth_quant_scaling_factor_to_model(module)
qstate.tensor_id_to_observer.clear()
qstate.weight_tensor_id_to_observer.clear()
for _, child in module.named_children():
attach_scale_zp_values_to_model(child)
def _check_observer_has_run(observer):
if observer.min_val.numel() == 0 or observer.max_val.numel() == 0:
return False
if (
(observer.min_val.dim() == 0 or observer.max_val.dim() == 0)
and observer.min_val == float("inf")
and observer.max_val == float("-inf")
):
return False
return True
def check_model_obsever_has_run(
module: torch.nn.Module,
) -> None:
"""
This function is about check whether the module's observer has been run by checking the
observer's min_value and max_max_value is the init value or not.
Rules:
- If no observer found, return false.
- If any observer returns true or false, return that value.
- To avoid wrong results in case no observer found at the beginning, only return false
if all checks of the module and its children return false.
"""
if hasattr(module, "_auto_quant_state"):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
quantized_dtype = [torch.quint8, torch.qint8]
for tensor_id, observer in qstate.tensor_id_to_observer.items():
if observer.dtype in quantized_dtype:
return _check_observer_has_run(observer)
else:
AssertionError(
False
), "The observer's dtype only can be torch.quint8 or torch.qint8"
for tensor_id, observer in qstate.weight_tensor_id_to_observer.items():
if observer.dtype in quantized_dtype:
return _check_observer_has_run(observer)
else:
AssertionError(
False
), "The observer's dtype only can be torch.quint8 or torch.qint8"
for _, child in module.named_children():
if check_model_obsever_has_run(child):
return True
return False
def attach_op_convert_info_to_model(
module: torch.nn.Module,
) -> None:
"""
Calculates the info needed to convert each op and attaches
it to the parent module. This is done to avoid recalculating these values
at inference.
"""
if hasattr(module, "_auto_quant_state"):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
for _, seen_q_op_info in qstate.idx_to_seen_q_op_infos.items():
qstate.idx_to_op_convert_info[
seen_q_op_info.idx
] = qstate.calculate_op_convert_info(seen_q_op_info)
qstate.idx_to_op_weight_convert_info[
seen_q_op_info.idx
] = qstate.calculate_op_weight_convert_info(seen_q_op_info)
_map_smooth_quant_info_to_idx(module)
for _, child in module.named_children():
attach_op_convert_info_to_model(child)
class Node:
def __init__(
self,
op_infos,
input_scale_zero=None,
weight_scale_zero=None,
output_scale_zero=None,
qconfig=None,
):
self.idx = op_infos.idx if hasattr(op_infos, "idx") else None
self.type = op_infos.type
self.fqn = op_infos.fqn
self.input_tensor_infos = op_infos.input_tensor_infos
self.input_tensor_force_inf_dtype = (
[] if qconfig is None else op_infos.input_tensor_force_inf_dtype
)
self.output_tensor_infos = op_infos.output_tensor_infos
self.insert_fake_quant_after_outputs = (
[] if qconfig is None else op_infos.insert_fake_quant_after_outputs
)
self.weight_tensor_infos = (
[] if qconfig is None else op_infos.weight_tensor_infos
)
self.qconfig = qconfig
self.input_scale_zero = input_scale_zero
self.weight_scale_zero = weight_scale_zero
self.output_scale_zero = output_scale_zero
self.pre_nodes = []
self.post_nodes = []
from collections import namedtuple
ParentNode = namedtuple(typename="ParentNode", field_names=["output_info"])
def convert_quant_state_map_to_nodes(quant_state_map):
nodes = []
# step1: create nodes
for _, v in quant_state_map.items():
new_parent_node = ParentNode(v.output_qtensor_infos)
nodes.append(new_parent_node)
for idx, q_op_info in v.idx_to_seen_q_op_infos.items():
input_scale_zero = {}
weight_scale_zero = {}
output_scale_zero = {}
for tensor_info in q_op_info.input_tensor_infos:
if (
tensor_info is not None
and tensor_info.id in v.tensor_id_to_scale_zp
):
input_scale_zero[tensor_info.id] = v.tensor_id_to_scale_zp[
tensor_info.id
]
for tensor_info in q_op_info.weight_tensor_infos:
if tensor_info is not None and (
str(idx) + "_" + str(tensor_info.id)
in v.weight_tensor_id_to_scale_zp
):
weight_id = str(idx) + "_" + str(tensor_info.id)
weight_scale_zero[weight_id] = v.weight_tensor_id_to_scale_zp[
weight_id
]
for tensor_info in q_op_info.output_tensor_infos:
if (
tensor_info is not None
and tensor_info.id in v.tensor_id_to_scale_zp
):
output_scale_zero[tensor_info.id] = v.tensor_id_to_scale_zp[
tensor_info.id
]
new_node = Node(
q_op_info,
input_scale_zero,
weight_scale_zero,
output_scale_zero,
qconfig=q_op_info.qconfig,
)
nodes.append(new_node)
for nonq_op_infos in v.seen_nonq_op_infos:
new_node = Node(nonq_op_infos)
nodes.append(new_node)
# create connection between nodess
for cur in nodes:
if isinstance(cur, ParentNode):
continue
for n in nodes:
if isinstance(n, ParentNode):
continue
# find pre_node:
for input_info in cur.input_tensor_infos:
if input_info in n.output_tensor_infos:
cur.pre_nodes.append(n)
# find nex_node:
for output_info in cur.output_tensor_infos:
if output_info in n.input_tensor_infos:
cur.post_nodes.append(n)
return nodes
def sync_pool_and_lstm_input_output_scale_zp(quant_state_map, nodes):
pool_ops = [
str(F.adaptive_avg_pool2d),
str(F.adaptive_avg_pool3d),
str(F.avg_pool2d),
str(F.avg_pool3d),
str(F.max_pool2d),
str(F.max_pool3d),
str(nn.MaxPool2d),
str(nn.MaxPool3d),
str(nn.AvgPool2d),
str(nn.AvgPool3d),
str(nn.AdaptiveAvgPool2d),
str(nn.AdaptiveAvgPool3d),
]
shape_ops = [str(torch.flatten), str(torch.nn.Flatten), str(torch.Tensor.flatten)]
rnn_ops = [str(torch.nn.LSTM)]
def _sync_scale_zp_given_id(quant_state_map, id, scale_zp):
for _, v in quant_state_map.items():
if id in v.tensor_id_to_scale_zp:
v.tensor_id_to_scale_zp[id] = scale_zp
def _find_sync_op_from_given_node(cur_node, ids):
for next in cur_node.post_nodes:
if next.type in (pool_ops + shape_ops + rnn_ops):
ids.append(next.output_tensor_infos[0].id)
_find_sync_op_from_given_node(next, ids)
for node in nodes:
if isinstance(node, ParentNode):
continue
if node.qconfig is not None and node.type in (pool_ops + rnn_ops):
if node.input_scale_zero == node.output_scale_zero:
continue
sync_node_begin = node
# fistly, find the fist sync op before the cur pooling(lstm) op.
# like: pooling->pool->shape->cur_node,
while len(sync_node_begin.pre_nodes) == 1 and sync_node_begin.pre_nodes[
0
].type in (pool_ops + shape_ops + rnn_ops):
sync_node_begin = sync_node_begin.pre_nodes[0]
tensor_ids = [sync_node_begin.output_tensor_infos[0].id]
scale_zp = sync_node_begin.input_scale_zero[
sync_node_begin.input_tensor_infos[0].id
]
# find all need sync op from sync_node_begin.
_find_sync_op_from_given_node(sync_node_begin, tensor_ids)
for id in tensor_ids:
_sync_scale_zp_given_id(quant_state_map, id, scale_zp)
def _check_after_nodes_all_quantized_give_node(node):
r"""
This function is about check whether given node's post nodes are all quantized,
"""
if len(node.post_nodes) == 0:
return False
else:
# make sure all post nodes are quantizabled.
for next in node.post_nodes:
if next.qconfig is None:
if next.type == str(nn.Identity):
return _check_after_nodes_all_quantized_give_node(next)
else:
return False
else:
int8_int8_symmetric_ops = [
str(interaction),
str(torch.ops.torch_ipex.interaction_forward),
str(torch.embedding_bag),
str(F.embedding_bag),
str(torch.nn.EmbeddingBag),
]
if next.type in int8_int8_symmetric_ops:
if next.type in [
str(interaction),
str(torch.ops.torch_ipex.interaction_forward),
]:
# node.input_tensor_infos may be set, we can use force_inf_dtype to check whether this op is quantizabled.
for force_inf_dtype in next.input_tensor_force_inf_dtype:
if force_inf_dtype != torch.qint8:
return False
else:
# embeddingBag
if next.weight_tensor_infos[0].inf_dtype != torch.qint8:
return False
else:
for force_inf_dtype in next.input_tensor_force_inf_dtype:
if force_inf_dtype not in [torch.qint8, torch.quint8]:
return False
# all post nodes are quantizabled.
return True
def set_node_output_quantized(nodes):
r"""
# For interation EmbeddingBag, pooling and elt_wise, we only support INT8->INT*, if those ops have quantized their inputs,
# we need make sure their output also have falk quant to make them call in INT8 kernel.
# this function will check whether the output inf dtype is int8 dtype if its' input is set to quantized, if the
# output's infe dtype is not int8, set it and also set insert_fake_quant_after_output to True.
"""
def _reset_post_node_input_infos(node):
# make sure the post node will insert fake quant if we add fake quant by cur node' output
if len(node.post_nodes) > 0:
for post_node in node.post_nodes:
if post_node.qconfig is not None:
for idx, tensor_info in enumerate(post_node.input_tensor_infos):
if tensor_info in node.output_tensor_infos:
post_node.input_tensor_force_inf_dtype[
idx
] = tensor_info.orig_dtype
elif post_node.type == str(nn.Identity):
_reset_post_node_input_infos(post_node)
for node in nodes:
if isinstance(node, ParentNode):
continue
if node.qconfig is not None and node.type in int8_int8_ops:
post_node_are_quantized = _check_after_nodes_all_quantized_give_node(node)
if node.type in str(torch.nn.EmbeddingBag):
if (
node.weight_tensor_infos[0].inf_dtype == torch.qint8
and not post_node_are_quantized
):
node.output_tensor_infos[0].inf_dtype = torch.qint8
node.insert_fake_quant_after_outputs[0] = True
_reset_post_node_input_infos(node)
elif node.type == str(F.embedding_bag):
if (
node.input_tensor_force_inf_dtype[1] == torch.qint8
and not post_node_are_quantized
):
node.output_tensor_infos[0].inf_dtype = torch.qint8
node.insert_fake_quant_after_outputs[0] = True
_reset_post_node_input_infos(node)
elif node.type in [
str(interaction),
str(torch.ops.torch_ipex.interaction_forward),
]:
if (
node.input_tensor_force_inf_dtype[0] == torch.qint8
and not post_node_are_quantized
):
node.output_tensor_infos[0].inf_dtype = torch.qint8
node.insert_fake_quant_after_outputs[0] = True
_reset_post_node_input_infos(node)
else:
# TODO: enable PackedSequence input for LSTM.
if not (
node.type in [nn.LSTM]
and len(node.input_tensor_infos) > 2
and node.input_tensor_infos[1].orig_dtype == torch.int64
):
if (
node.input_tensor_force_inf_dtype[0]
in [torch.qint8, torch.quint8]
and not post_node_are_quantized
):
node.output_tensor_infos[
0
].inf_dtype = node.input_tensor_force_inf_dtype[0]
node.insert_fake_quant_after_outputs[0] = True
_reset_post_node_input_infos(node)
qscheme_dict = {
str(torch.per_tensor_affine): torch.per_tensor_affine,
str(torch.per_tensor_symmetric): torch.per_tensor_symmetric,
str(torch.per_channel_affine): torch.per_channel_affine,
str(torch.per_channel_symmetric): torch.per_channel_symmetric,
}
dtype_dict = {
str(torch.quint8): torch.quint8,
str(torch.qint8): torch.qint8,
str(torch.float32): torch.float32,
str(torch.float64): torch.float64,
str(torch.float16): torch.float16,
str(torch.bfloat16): torch.bfloat16,
str(torch.complex64): torch.complex64,
str(torch.complex128): torch.complex128,
str(torch.int16): torch.int16,
str(torch.int32): torch.int32,
str(torch.int64): torch.int64,
str(torch.bool): torch.bool,
str(torch.uint8): torch.uint8,
str(torch.int8): torch.int8,
str(torch.quint4x2): torch.quint4x2,
}
IPEX_OBSERVERS = {
"SmoothQuantActivationObserver": SmoothQuantActivationObserver,
"SmoothQuantWeightObserver": SmoothQuantWeightObserver,
}
def _get_observer_setting(observer):
r"""
Convert torch observer's args to dict for saving to json file.
Because json only accept number or string, so we will convert some
class type to string(dtype, qscheme, other class type?).
"""
observer_setting = OrderedDict()
observer_setting["name"] = observer.__class__.__name__
# get observer arg name
observer_args = inspect.signature(observer.__init__).parameters
observer_dict = observer.__dict__
# Now we only can save number or string to json file.
for arg_name in observer_args.keys():
if arg_name in observer_dict:
if isinstance(observer_dict[arg_name], numbers.Number):
observer_setting[arg_name] = observer_dict[arg_name]
elif arg_name == "qscheme" or arg_name == "dtype":
observer_setting[arg_name] = str(observer_dict[arg_name])
elif (
arg_name == "eps"
and hasattr(observer, "eps")
and isinstance(observer.eps, torch.Tensor)
and observer.eps.numel() == 1
):
observer_setting[arg_name] = observer.eps.item()
return observer_setting
def _create_observer(setting):
r"""
Create torch observer according to user's setting.
"""
if "qscheme" in setting:
setting["qscheme"] = qscheme_dict[setting["qscheme"]]
if "dtype" in setting:
setting["dtype"] = dtype_dict[setting["dtype"]]
if hasattr(torch.quantization.observer, setting["name"]):
observer = getattr(torch.quantization.observer, setting["name"])
setting.pop("name", None)
return observer.with_args(**setting)
elif setting["name"] in IPEX_OBSERVERS:
observer = IPEX_OBSERVERS[setting["name"]]
setting.pop("name", None)
# SmoothQuant observers contain sub-observers
smooth_quant_sub_obs_keys = [
"act_observer",
"act_ic_observer",
"wei_observer",
"wei_ic_observer",
]
for key in smooth_quant_sub_obs_keys:
if key in setting:
setting[key] = _create_observer(setting[key])()
return observer.with_args(**setting)
else:
raise NameError("torch.quantization.observer %s not found" % setting["name"])
def save_quant_state(quant_state_map, configure_file):
# save qparam's as json file for tunning
quant_state_dict = OrderedDict()
for k, v in quant_state_map.items():
layer_infos = OrderedDict()
if len(v.idx_to_seen_q_op_infos) == 0:
layer_infos["q_op_infos"] = {}
else:
q_op_infos = OrderedDict()
for q_k, op_info in v.idx_to_seen_q_op_infos.items():
info = OrderedDict()
info["op_type"] = op_info.type
info["op_type_is_module"] = op_info.type_is_module
info["fqn"] = op_info.fqn
input_tensor_infos = []
smooth_quant_enabled = False
for tensor_info, force_dtype in zip(
op_info.input_tensor_infos, op_info.input_tensor_force_inf_dtype
):
cur_tensor_infos = {}
if tensor_info is not None:
cur_tensor_infos["id"] = tensor_info.id
cur_tensor_infos["orig_dtype"] = str(tensor_info.orig_dtype)
cur_tensor_infos["inf_dtype"] = str(tensor_info.inf_dtype)
cur_tensor_infos["force_dtype"] = str(force_dtype)
if tensor_info.id in v.tensor_id_to_scale_zp:
cur_tensor_infos["scale"] = v.tensor_id_to_scale_zp[
tensor_info.id
][0].tolist()
cur_tensor_infos["zero_point"] = v.tensor_id_to_scale_zp[
tensor_info.id
][1].tolist()
if (
str(tensor_info.id)
in v.tensor_id_to_smooth_quant_scaling_factor
and v.tensor_id_to_smooth_quant_scaling_factor[
str(tensor_info.id)
]
is not None
):
cur_tensor_infos[
"smooth_quant_scaling_factor"
] = v.tensor_id_to_smooth_quant_scaling_factor[
str(tensor_info.id)
].tolist()
smooth_quant_enabled = True
input_tensor_infos.append(cur_tensor_infos)
info["input_tensor_infos"] = input_tensor_infos
# weight infos
weight_tensor_infos = []
for tensor_info in op_info.weight_tensor_infos:
cur_tensor_infos = {}
if tensor_info is not None:
cur_tensor_infos["orig_dtype"] = str(tensor_info.orig_dtype)
cur_tensor_infos["inf_dtype"] = str(tensor_info.inf_dtype)
weight_idx = str(op_info.idx) + "_" + str(tensor_info.id)
if weight_idx in v.weight_tensor_id_to_scale_zp:
cur_tensor_infos["scale"] = v.weight_tensor_id_to_scale_zp[
weight_idx
][0].tolist()
cur_tensor_infos[
"zero_point"
] = v.weight_tensor_id_to_scale_zp[weight_idx][1].tolist()
if (
weight_idx
in v.weight_tensor_id_to_smooth_quant_scaling_factor
):
cur_tensor_infos[
"smooth_quant_scaling_factor"
] = v.weight_tensor_id_to_smooth_quant_scaling_factor[
weight_idx
].tolist()
weight_tensor_infos.append(cur_tensor_infos)
info["weight_tensor_infos"] = weight_tensor_infos
# output infos
output_tensor_infos = []
for tensor_info in op_info.output_tensor_infos:
cur_tensor_infos = {}
if tensor_info is not None:
cur_tensor_infos["id"] = tensor_info.id
cur_tensor_infos["orig_dtype"] = str(tensor_info.orig_dtype)
cur_tensor_infos["inf_dtype"] = str(tensor_info.inf_dtype)
if tensor_info.id in v.tensor_id_to_scale_zp:
cur_tensor_infos["scale"] = v.tensor_id_to_scale_zp[
tensor_info.id
][0].tolist()
cur_tensor_infos["zero_point"] = v.tensor_id_to_scale_zp[
tensor_info.id
][1].tolist()
if tensor_info.id in v.tensor_id_to_smooth_quant_scaling_factor:
cur_tensor_infos[
"smooth_quant_scaling_factor"
] = v.tensor_id_to_smooth_quant_scaling_factor[
tensor_info.id
].tolist()
output_tensor_infos.append(cur_tensor_infos)
info["output_tensor_infos"] = output_tensor_infos
# qconfig
info["activation_observer"] = _get_observer_setting(
op_info.qconfig.activation()
)
if isinstance(
op_info.qconfig.activation(), SmoothQuantActivationObserver
):
info["activation_observer"][
"smooth_quant_enabled"
] = smooth_quant_enabled
info["activation_observer"]["act_observer"] = _get_observer_setting(
op_info.qconfig.activation().act_obs
)
info["activation_observer"][
"act_ic_observer"
] = _get_observer_setting(op_info.qconfig.activation().ic_obs)
info["weight_observer"] = _get_observer_setting(
op_info.qconfig.weight()
)
if isinstance(op_info.qconfig.weight(), SmoothQuantWeightObserver):
info["weight_observer"][
"smooth_quant_enabled"
] = smooth_quant_enabled
info["weight_observer"]["wei_observer"] = _get_observer_setting(
op_info.qconfig.weight().oc_obs
)
info["weight_observer"]["wei_ic_observer"] = _get_observer_setting(
op_info.qconfig.weight().ic_obs
)
q_op_infos[q_k] = info
layer_infos["q_op_infos"] = q_op_infos
if len(v.seen_nonq_op_infos) == 0:
layer_infos["nonq_op_infos"] = {}
else:
nonq_op_infos = OrderedDict()
for non_q_k, op_info in enumerate(v.seen_nonq_op_infos):
info = OrderedDict()
info["op_type"] = op_info.type
info["fqn"] = op_info.fqn
input_tensor_infos = []
for tensor_info in op_info.input_tensor_infos:
cur_tensor_infos = {}
if tensor_info is not None:
cur_tensor_infos["id"] = tensor_info.id
cur_tensor_infos["orig_dtype"] = str(tensor_info.orig_dtype)
cur_tensor_infos["inf_dtype"] = str(tensor_info.inf_dtype)
input_tensor_infos.append(cur_tensor_infos)
info["input_tensor_infos"] = input_tensor_infos
output_tensor_infos = []
for tensor_info in op_info.output_tensor_infos:
cur_tensor_infos = {}
if tensor_info is not None:
cur_tensor_infos["id"] = tensor_info.id
cur_tensor_infos["orig_dtype"] = str(tensor_info.orig_dtype)
cur_tensor_infos["inf_dtype"] = str(tensor_info.inf_dtype)
output_tensor_infos.append(cur_tensor_infos)
info["output_tensor_infos"] = output_tensor_infos
nonq_op_infos[non_q_k] = info
layer_infos["nonq_op_infos"] = nonq_op_infos
layer_output_infos = []
for tensor_info in v.output_qtensor_infos:
cur_tensor_infos = {}
if tensor_info is not None:
cur_tensor_infos["id"] = tensor_info.id
cur_tensor_infos["orig_dtype"] = str(tensor_info.orig_dtype)
cur_tensor_infos["inf_dtype"] = str(tensor_info.inf_dtype)
if tensor_info.id in v.tensor_id_to_scale_zp:
cur_tensor_infos["scale"] = v.tensor_id_to_scale_zp[tensor_info.id][
0
].tolist()
cur_tensor_infos["zero_point"] = v.tensor_id_to_scale_zp[
tensor_info.id
][1].tolist()
layer_output_infos.append(cur_tensor_infos)
layer_infos["layer_output_infos"] = layer_output_infos
quant_state_dict[k] = layer_infos
# save qparms as json file
if configure_file is not None:
with open(configure_file, "w") as fp:
json.dump(quant_state_dict, fp, indent=4)
def load_qconf_summary_to_model(model, qconf_summary):
"""
This function is about load the user given configure to origin model.
"""
with open(qconf_summary, "r") as f:
quant_state_dict = json.load(f)
quant_state_map = model._fqn_to_auto_quant_state_map
for k, v in quant_state_map.items():
layer_info = quant_state_dict[k]
user_q_op_infos = layer_info["q_op_infos"]
for i, q_op_info in user_q_op_infos.items():
fqn = q_op_info["fqn"]
cur_fqn = v.idx_to_seen_q_op_infos[int(i)].fqn
assert (
int(i) in v.idx_to_seen_q_op_infos and cur_fqn == fqn
), "Loaded quantizable op info doesn't match the current model quantizable op info"
input_tensor_infos = []
input_force_dtype_infos = []
for tensor_info in q_op_info["input_tensor_infos"]:
if len(tensor_info) > 0:
input_tensor_infos.append(
QTensorInfo(
tensor_info["id"],
dtype_dict[tensor_info["orig_dtype"]],
dtype_dict[tensor_info["inf_dtype"]],
)
)
input_force_dtype_infos.append(
dtype_dict[tensor_info["force_dtype"]]
)
if "scale" in tensor_info:
scale = torch.FloatTensor(tensor_info["scale"])
zp = torch.LongTensor(tensor_info["zero_point"])
v.tensor_id_to_scale_zp[tensor_info["id"]] = (scale, zp)
if "smooth_quant_scaling_factor" in tensor_info:
scaling_factor = torch.FloatTensor(
tensor_info["smooth_quant_scaling_factor"]
)
v.tensor_id_to_smooth_quant_scaling_factor[
str(tensor_info["id"])
] = scaling_factor
else:
input_tensor_infos.append(None)
input_force_dtype_infos.append(None)
weight_tensor_infos = []
weight_idx = 0
for tensor_info in q_op_info["weight_tensor_infos"]:
if len(tensor_info) > 0:
weight_tensor_infos.append(
QTensorInfo(
weight_idx,
dtype_dict[tensor_info["orig_dtype"]],
dtype_dict[tensor_info["inf_dtype"]],
)
)
if "scale" in tensor_info:
scale = torch.FloatTensor(tensor_info["scale"])
zp = torch.LongTensor(tensor_info["zero_point"])
v.weight_tensor_id_to_scale_zp[
str(i) + "_" + str(weight_idx)
] = (scale, zp)
if "smooth_quant_scaling_factor" in tensor_info:
scaling_factor = torch.FloatTensor(
tensor_info["smooth_quant_scaling_factor"]
)
v.weight_tensor_id_to_smooth_quant_scaling_factor[
str(i) + "_" + str(weight_idx)
] = scaling_factor
weight_idx += 1
else:
weight_tensor_infos.append(None)
output_tensor_infos = []
insert_fake_quant_after_outputs = []
for tensor_info in q_op_info["output_tensor_infos"]:
if len(tensor_info) > 0:
output_tensor_infos.append(
QTensorInfo(
tensor_info["id"],
dtype_dict[tensor_info["orig_dtype"]],
dtype_dict[tensor_info["inf_dtype"]],
)
)
insert_fake_quant_after_outputs.append(False)
if "scale" in tensor_info:
scale = torch.FloatTensor(tensor_info["scale"])
zp = torch.LongTensor(tensor_info["zero_point"])
v.tensor_id_to_scale_zp[tensor_info["id"]] = (scale, zp)
else:
output_tensor_infos.append(None)
activation_observer = q_op_info["activation_observer"]
weight_observer = q_op_info["weight_observer"]
qconfig = QConfig(
activation=_create_observer(activation_observer),
weight=_create_observer(weight_observer),
)
# overide the cur model's info
v.idx_to_seen_q_op_infos[int(i)].input_tensor_infos = input_tensor_infos
v.idx_to_seen_q_op_infos[
int(i)
].input_tensor_force_inf_dtype = input_force_dtype_infos
v.idx_to_seen_q_op_infos[int(i)].output_tensor_infos = output_tensor_infos
v.idx_to_seen_q_op_infos[
int(i)
].insert_fake_quant_after_outputs = insert_fake_quant_after_outputs
v.idx_to_seen_q_op_infos[int(i)].weight_tensor_infos = weight_tensor_infos
v.idx_to_seen_q_op_infos[int(i)].qconfig = qconfig
user_nonq_op_infos = layer_info["nonq_op_infos"]
# v.seen_nonq_op_infos.clear()
idx = 0
for _, nonq_op_info in user_nonq_op_infos.items():
fqn = nonq_op_info["fqn"]
cur_fqn = v.seen_nonq_op_infos[idx].fqn
assert (
cur_fqn == fqn
), "Loaded none-quantizable op info doesn't match the current model none-quantizable op info"
input_tensor_infos = []
for tensor_info in nonq_op_info["input_tensor_infos"]:
if len(tensor_info) > 0:
input_tensor_infos.append(
QTensorInfo(
tensor_info["id"],
dtype_dict[tensor_info["orig_dtype"]],
dtype_dict[tensor_info["inf_dtype"]],
)
)
else:
input_tensor_infos.append(None)
output_tensor_infos = []
for tensor_info in nonq_op_info["output_tensor_infos"]:
if len(tensor_info) > 0:
output_tensor_infos.append(
QTensorInfo(
tensor_info["id"],
dtype_dict[tensor_info["orig_dtype"]],
dtype_dict[tensor_info["inf_dtype"]],
)
)
else:
output_tensor_infos.append(None)
v.seen_nonq_op_infos[idx].input_tensor_infos = input_tensor_infos
v.seen_nonq_op_infos[idx].output_tensor_infos = output_tensor_infos
idx += 1
layer_output_info = []
for tensor_info in layer_info["layer_output_infos"]:
if len(tensor_info) > 0:
layer_output_info.append(
QTensorInfo(
tensor_info["id"],
dtype_dict[tensor_info["orig_dtype"]],
dtype_dict[tensor_info["inf_dtype"]],
)
)
if "scale" in tensor_info:
scale = torch.FloatTensor(tensor_info["scale"])
zp = torch.LongTensor(tensor_info["zero_point"])
v.tensor_id_to_scale_zp[tensor_info["id"]] = (scale, zp)
else:
layer_output_info.append(None)
v.output_qtensor_infos = layer_output_info
# insert observer according to user's setting.
for _, v in model.named_modules():
if hasattr(v, "_auto_quant_state"):
v._auto_quant_state.tensor_id_to_observer.clear()
v._auto_quant_state.weight_tensor_id_to_observer.clear()
v._auto_quant_state.insert_observers(v)
# update insert_fake_quant_after_output after load user setting, which for avoiding redundant fake quant.
nodes = convert_quant_state_map_to_nodes(quant_state_map)
set_node_output_quantized(nodes)
def _lstm_forward(module, input, hx, weights):
r"""
LSTM forward function.
"""
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
# batch_sizes = None
if isinstance(orig_input, torch.nn.utils.rnn.PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if module.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
max_batch_size = input.size(0) if module.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if module.bidirectional else 1
real_hidden_size = (
module.proj_size if module.proj_size > 0 else module.hidden_size
)
h_zeros = torch.zeros(
module.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
device=input.device,
)
c_zeros = torch.zeros(
module.num_layers * num_directions,
max_batch_size,
module.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (h_zeros, c_zeros)
else:
if batch_sizes is None: # If not PackedSequence input.
if is_batched:
if hx[0].dim() != 3 or hx[1].dim() != 3:
msg = (
"For batched 3-D input, hx and cx should "
f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
)
raise RuntimeError(msg)
else:
if hx[0].dim() != 2 or hx[1].dim() != 2:
msg = (
"For unbatched 2-D input, hx and cx should "
f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
)
raise RuntimeError(msg)
hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = module.permute_hidden(hx, sorted_indices)
module.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = _VF.lstm(
input,
hx,
weights,
module.bias,
module.num_layers,
module.dropout,
module.training,
module.bidirectional,
module.batch_first,
)
else:
result = _VF.lstm(
input,
batch_sizes,
hx,
weights,
module.bias,
module.num_layers,
module.dropout,
module.training,
module.bidirectional,
)
output = result[0]
hidden = result[1:]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, torch.nn.utils.rnn.PackedSequence):
output_packed = torch.nn.utils.rnn.PackedSequence(
output, batch_sizes, sorted_indices, unsorted_indices
)
return output_packed, module.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched:
output = output.squeeze(batch_dim)
hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
return output, module.permute_hidden(hidden, unsorted_indices)
def module_call_to_function_call(module, args, weights):
r"""
This function is a help function which replace nn.module call to funtion call, which implement
the nn.module's forward function.
"""
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Conv3d):
output = module._conv_forward(args[0], weights[0], module.bias)
elif isinstance(module, torch.nn.Linear):
output = F.linear(args[0], weights[0], module.bias)
elif isinstance(module, torch.nn.EmbeddingBag):
output = F.embedding_bag(
args[0],
weights[0],
args[1],
module.max_norm,
module.norm_type,
module.scale_grad_by_freq,
module.mode,
module.sparse,
args[2] if len(args) == 3 else None,
module.include_last_offset,
module.padding_idx,
)
elif isinstance(module, torch.nn.ConvTranspose2d) or isinstance(
module, torch.nn.ConvTranspose3d
):
if module.padding_mode != "zeros":
raise ValueError(
"Only `zeros` padding mode is supported for ConvTranspose2d"
)
assert isinstance(module.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
output_size = args[1] if len(args) == 2 else None
# master code
"""
num_spatial_dims = 2 if isinstance(module, torch.nn.ConvTranspose2d) else 3
output_padding = module._output_padding(args[0], output_size,
module.stride, module.padding, module.kernel_size,
num_spatial_dims, module.dilation)
"""
output_padding = module._output_padding(
args[0],
output_size,
module.stride,
module.padding,
module.kernel_size,
module.dilation,
)
# output_padding = module._output_padding(*arg_to)
if isinstance(module, torch.nn.ConvTranspose2d):
output = F.conv_transpose2d(
args[0],
weights[0],
module.bias,
module.stride,
module.padding,
output_padding,
module.groups,
module.dilation,
)
else:
output = F.conv_transpose3d(
args[0],
weights[0],
module.bias,
module.stride,
module.padding,
output_padding,
module.groups,
module.dilation,
)
elif isinstance(module, torch.nn.LSTM):
output = _lstm_forward(
module, args[0], args[1] if len(args) == 2 else None, weights
)
return output
def _attach_smooth_quant_scaling_factor_to_model(module):
"""
Get scaling factors for SmoothQuant from observers and
store them in qstate
"""
if not hasattr(module, "_auto_quant_state"):
return
qstate = module._auto_quant_state
qconfig = qstate.qconfig
if not isinstance(
qconfig.activation(), SmoothQuantActivationObserver
) or not isinstance(qconfig.weight(), SmoothQuantWeightObserver):
return
if not qstate.tensor_id_to_observer:
return
for key, obs in qstate.tensor_id_to_observer.items():
if key in qstate.tensor_id_to_smooth_quant_scaling_factor:
continue
scaling_factors = obs.get_scaling_factors()
qstate.tensor_id_to_smooth_quant_scaling_factor[key] = scaling_factors
for key, obs in qstate.weight_tensor_id_to_observer.items():
if key in qstate.weight_tensor_id_to_smooth_quant_scaling_factor:
continue
scaling_factors = obs.get_scaling_factors()
qstate.weight_tensor_id_to_smooth_quant_scaling_factor[key] = scaling_factors
def _map_smooth_quant_info_to_idx(module):
"""
Map dict of {tensor id: smooth quant scaling factor} to
dict of {idx: smooth quant scaling factor}.
For nn.Linear module only.
"""
if not hasattr(module, "_auto_quant_state"):
return
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
qconfig = qstate.qconfig
if not isinstance(
qconfig.activation(), SmoothQuantActivationObserver
) or not isinstance(qconfig.weight(), SmoothQuantWeightObserver):
return
for _, seen_q_op_info in qstate.idx_to_seen_q_op_infos.items():
if not seen_q_op_info.input_tensor_infos:
continue
# Linear has only one activation
for input_arg in seen_q_op_info.input_tensor_infos:
if input_arg is None:
continue
tensor_id = str(input_arg.id)
if tensor_id in qstate.tensor_id_to_smooth_quant_scaling_factor:
key = str(seen_q_op_info.idx)
qstate.idx_to_smooth_quant_scaling_factor[
key
] = qstate.tensor_id_to_smooth_quant_scaling_factor[tensor_id]
# Linear has only one weight. Key is not changed.
for weight_arg in seen_q_op_info.weight_tensor_infos:
if weight_arg is None:
continue
tensor_id = str(seen_q_op_info.idx) + "_" + str(weight_arg.id)
if tensor_id in qstate.weight_tensor_id_to_smooth_quant_scaling_factor:
key = str(seen_q_op_info.idx) + "_" + str(weight_arg.id)
qstate.idx_to_smooth_quant_scaling_factor[
key
] = qstate.weight_tensor_id_to_smooth_quant_scaling_factor[tensor_id]
| 51,792 | 40.236465 | 130 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_autotune.py | # This Python file uses the following encoding: utf-8
import sys
import subprocess
import time
def autotune(
prepared_model,
calib_dataloader,
eval_func,
sampling_sizes=None,
accuracy_criterion=None,
tuning_time=0,
):
r"""
Automatic accuracy-driven tuning helps users quickly find out the advanced recipe for INT8 inference.
Args:
prepared_model (torch.nn.Module): the FP32 prepared model returned from ipex.quantization.prepare.
calib_dataloader (generator): set a dataloader for calibration.
eval_func (function): set a evaluation function. This function takes "model" as input parameter
executes entire evaluation process with self contained metrics,
and returns an accuracy value which is a scalar number. The higher the better.
sampling_sizes (list): a list of sample sizes used in calibration, where the tuning algorithm would explore from.
The default value is ``[100]``.
accuracy_criterion ({accuracy_criterion_type(str, 'relative' or 'absolute') : accuracy_criterion_value(float)}):
set the maximum allowed accuracy loss, either relative or absolute. The default value is ``{'relative': 0.01}``.
tuning_time (seconds): tuning timeout. The default value is ``0`` which means early stop.
Returns:
FP32 tuned model (torch.nn.Module)
"""
if sampling_sizes is None:
sampling_sizes = [100]
if accuracy_criterion is None:
accuracy_criterion = {"relative": 0.01}
neural_compressor_version = "2.1"
try:
import neural_compressor
if neural_compressor.__version__ != neural_compressor_version:
raise RuntimeError(
"Please install Intel® Neural Compressor with version {} while the current version of \
Intel® Neural Compressor is {}.".format(
neural_compressor_version, neural_compressor.__version__
)
)
except ImportError:
try:
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"neural_compressor=={}".format(neural_compressor_version),
]
)
import neural_compressor
except BaseException:
AssertionError(
False
), "Unable to import neural_compressor from the local environment."
from neural_compressor import PostTrainingQuantConfig
from neural_compressor.config import TuningCriterion, AccuracyCriterion
from neural_compressor import quantization
conf = PostTrainingQuantConfig(
backend="ipex",
calibration_sampling_size=sampling_sizes,
tuning_criterion=TuningCriterion(timeout=tuning_time),
accuracy_criterion=AccuracyCriterion(
criterion=list(accuracy_criterion.keys())[0],
tolerable_loss=list(accuracy_criterion.values())[0],
),
excluded_precisions=["bf16"],
)
q_model = quantization.fit(
prepared_model, conf, calib_dataloader=calib_dataloader, eval_func=eval_func
)
dirname_str = "./saved_tuning_results_" + time.strftime("%Y%m%d_%H%M%S")
q_model.save(dirname_str)
prepared_model.load_qconf_summary(
qconf_summary=dirname_str + "/best_configure.json"
)
return prepared_model
| 3,457 | 37.853933 | 124 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_quantize_utils.py | import os
import copy
from typing import List, Dict, Tuple, Any, Optional
import torch
from torch.fx.node import map_aggregate
from torch.ao.quantization import PlaceholderObserver
from torch.quantization.qconfig import QConfig
from torch.nn.utils.rnn import PackedSequence
from ._utils import (
get_torch_function_hook_type,
HookType,
get_module_hook_type,
OpQuantizeabilityType,
attach_op_convert_info_to_model,
save_quant_state,
attach_scale_zp_values_to_model,
convert_quant_state_map_to_nodes,
sync_pool_and_lstm_input_output_scale_zp,
module_call_to_function_call,
quantized_modules_has_weights,
load_qconf_summary_to_model,
get_fqn_valid_for_module_dict_key,
check_model_obsever_has_run,
)
from ._quantization_state import (
AutoQuantizationState,
AutoQuantizationStateModuleDict,
init_model_quant_state,
)
from ._recipe import get_default_recipe
from ._module_swap_utils import swap_child_modules
# AutoQuantizationState lives in parent module's _modules.
# Currently, `torch.nn.Sequential`'s forward iterates over all
# items in _modules. To avoid changing the meaning of the program, for
# now we patch the forward to ignore our quantization state.
# Note: this is a hackedy hack, before launching we should consider
# checking the fix into `torch.nn.Sequential` to avoid the patch.
def _nn_sequential_patched_forward(cls, input):
for module in cls:
if not isinstance(module, AutoQuantizationStateModuleDict):
input = module(input)
return input
def _convert_PackedSequence_to_tuple_lstm(args):
if isinstance(args, tuple) and len(args) == 2: # (PackedSequence, hx)
input, batch_sizes, sorted_indices, unsorted_indices = args[0]
args = (input, batch_sizes, sorted_indices, unsorted_indices, args[-1])
elif isinstance(args, tuple) and len(args) == 1: # (PackedSequence, )
input, batch_sizes, sorted_indices, unsorted_indices = args[0]
args = (input, batch_sizes, sorted_indices, unsorted_indices)
else:
AssertionError(
False
), "_convert_PackedSequence_to_tuple args should be a tuple with size 2 or PackedSequence"
return args
def _convert_tuple_to_PackedSequence_lstm(args):
assert (
isinstance(args, tuple) and len(args) >= 4 and len(args) <= 5
), "_convert_tuple_to_PackedSequence input should be a tuple(5=<size >=4)"
if len(args) == 4:
return (PackedSequence(*args),)
else:
return (PackedSequence(*args[:-1]), args[-1])
def auto_prepare(
model: torch.nn.Module,
configure: QConfig,
example_inputs: Optional[Tuple[Any]],
example_kwarg_inputs: Optional[Dict[Any, Any]],
) -> torch.nn.Module:
def convert_to_interception_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationPrepareTensorProxy) # type: ignore[arg-type]
else:
return x
cur_module = None
first_call = True
module_stack: List[torch.nn.Module] = []
# Counter for tensor IDs, will be modified inplace by quant state.
# This is used to track tensors from output ops to input ops. For example,
# if op_n had a tensor output with id=1, and op_n+2 had a tensor input with
# id=1, we know that the output of op_n is the input to op_n+2. Note,
# this is a list because it needs to incremented inplace.
qtensor_id = [0]
module_id_to_fqn: Dict[int, str] = {}
# Counter for global quantizeable ops, useful for intermediate activation
# logging.
global_op_idx = [0]
global_disable_torch_function_override = False
def check_add_has_scalar_tensor_input(args):
r"""
This function is about check add whether has scalar(tensor) input.
"""
nonlocal global_disable_torch_function_override
old_global_disable_torch_function_override = (
global_disable_torch_function_override
)
global_disable_torch_function_override = True
for arg in args:
if not isinstance(arg, torch.Tensor) or arg.dim() == 0:
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
return True
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
return False
class QuantizationPrepareTensorProxy(torch.Tensor):
"""
An override of `torch.Tensor` to enable dynamic tracing for
quantization.
For each function with a `__torch_function__` override, this proxy does
the following for functions which need quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls `_auto_quant_state.op_prepare_before_hook`
3. executes the original function
4. calls `_auto_quant_state.op_prepare_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original function.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
nonlocal global_disable_torch_function_override
if (
# global override means disable the override here
global_disable_torch_function_override
or
# to prevent printing things from going into an infinite loop
func == torch.Tensor.__repr__
or
# we don't need to override getters in this framework
func.__name__ == "__get__"
):
return super().__torch_function__(func, types, args, kwargs)
# if we are in a function, the current module is always a parent
nonlocal cur_module
parent_module = cur_module
nonlocal qtensor_id
kwargs = kwargs if kwargs else {}
hook_type = get_torch_function_hook_type(parent_module, func)
# Don't support torch.add(tensor, scaler)
# case, scalar+scalar, pytorch trace will convert the first input as a tensor at convert step,
# but we didn't collect the quant info at calibration step, which can't get
# quant info here(asster KeyError), so we disable torch.add(tensor, scaler) quantizaiton.
if (
hook_type is HookType.OP_HOOKS
and func in [torch.add, torch.Tensor.add]
and check_add_has_scalar_tensor_input(args)
):
hook_type = None
if hook_type is HookType.OP_HOOKS:
fqn = module_id_to_fqn[id(parent_module)] if parent_module else None
qstate = parent_module._auto_quant_state # type: ignore[attr-defined]
if not first_call:
qstate.validate_cur_op(func)
# run "before" hook
if first_call:
args, kwargs = qstate.first_call_op_prepare_before_hook(
func,
args,
kwargs,
qtensor_id,
fqn,
parent_module,
OpQuantizeabilityType.QUANTIZEABLE,
)
else:
args, kwargs = qstate.op_prepare_before_hook(func, args, kwargs)
# forward
output = super().__torch_function__(func, types, args, kwargs)
# run "after" hook
if first_call:
output = qstate.first_call_op_prepare_after_hook(
func,
output,
args,
qtensor_id,
OpQuantizeabilityType.QUANTIZEABLE,
)
else:
output = qstate.op_prepare_after_hook(
func, output, args, global_op_idx
)
qstate.mark_cur_op_complete(func)
else:
# Hook type is not HookType.OP_HOOKS, if first_call is True we
# record the DAG of non-quantizeable ops.
if first_call:
qstate = getattr(parent_module, "_auto_quant_state", None)
if qstate:
fqn = (
module_id_to_fqn.get(id(parent_module), None)
if parent_module
else None
)
args, kwargs = qstate.first_call_op_prepare_before_hook(
func,
args,
kwargs,
qtensor_id,
fqn,
parent_module,
OpQuantizeabilityType.NOT_QUANTIZEABLE,
)
output = super().__torch_function__(func, types, args, kwargs)
if first_call:
qstate = getattr(parent_module, "_auto_quant_state", None)
if qstate:
output = qstate.first_call_op_prepare_after_hook(
func,
output,
args,
qtensor_id,
OpQuantizeabilityType.NOT_QUANTIZEABLE,
)
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationPrepareTensorProxy
)
assert output is not NotImplemented
return output
def __repr__(self):
return f"QuantizationPrepareTensorProxy({super().__repr__()})"
# TODO(future PR): add other math overrides
class QuantizationInterceptionModule(type(model)): # type: ignore[misc]
"""
An override of user defined subclass of `nn.Module` to enable
dynamic tracing for quantization.
`cur_module` keeps track of the current module in the stack.
During the fist call, an `AutoQuantizationState` object is created and
attached to each non-leaf modules which we need to check for
quantizeable operations.
We override the `__call__` function to do the following for each
module:
If the module is an op which needs quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls parent module's `._auto_quant_state.op_prepare_before_hook`
3. executes the original module forward
4. calls parent module's `_auto_quant_state.op_prepare_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original module forward.
"""
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_interception_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_interception_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
nonlocal cur_module
old_module = cur_module
cur_module = self
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
fqn = module_id_to_fqn.get(id(self), None)
hook_type = get_module_hook_type(parent_module, cur_module)
if hook_type is HookType.OP_HOOKS:
parent_qstate: AutoQuantizationState = (
parent_module._auto_quant_state
) # type: ignore[union-attr, assignment]
# before hooks
if not first_call:
parent_qstate.validate_cur_op(cur_module)
# If we are in this hook, `cur_module` is a leaf module.
# Therefore, we do not need to override any of its
# children. Disabling the overrides for performance.
nonlocal global_disable_torch_function_override
old_global_disable_torch_function_override = (
global_disable_torch_function_override
)
global_disable_torch_function_override = True
is_lstm_packed_input = isinstance(
cur_module, torch.nn.LSTM
) and isinstance(args[0], PackedSequence)
if is_lstm_packed_input:
args = _convert_PackedSequence_to_tuple_lstm(args)
if first_call:
# mypy ignore is used instead of assert because this
# runs on every forward and assert has a performance cost
(
args,
kwargs,
) = parent_qstate.first_call_op_prepare_before_hook(
cur_module,
args,
kwargs,
qtensor_id,
fqn,
cur_module, # type: ignore[arg-type]
OpQuantizeabilityType.QUANTIZEABLE,
)
else:
# mypy ignore is used instead of assert because this
# runs on every forward and assert has a performance cost
args, kwargs = parent_qstate.op_prepare_before_hook(
cur_module, args, kwargs
) # type: ignore[arg-type]
if is_lstm_packed_input:
args = _convert_tuple_to_PackedSequence_lstm(args)
# original forward
output = orig_module_call(self, *args, **kwargs)
# Re-enable the overrides.
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
# after hooks
if is_lstm_packed_input:
output = _convert_PackedSequence_to_tuple_lstm(output)
if first_call:
output = parent_qstate.first_call_op_prepare_after_hook(
cur_module,
output,
args,
qtensor_id,
OpQuantizeabilityType.QUANTIZEABLE,
)
else:
output = parent_qstate.op_prepare_after_hook(
cur_module, output, args, global_op_idx
)
if is_lstm_packed_input:
output = _convert_tuple_to_PackedSequence_lstm(output)
parent_qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
cur_qstate = cur_module._auto_quant_state
cur_qstate.reset_to_new_call()
# original forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
if first_call:
output = cur_qstate.first_call_outputs_prepare_hook(
output, qtensor_id
)
else:
output = cur_qstate.outputs_prepare_hook(output)
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
if first_call and parent_module is not None:
parent_qstate_fc = getattr(
parent_module, "_auto_quant_state", None
)
if parent_qstate_fc:
(
args,
kwargs,
) = parent_qstate_fc.first_call_op_prepare_before_hook(
cur_module,
args,
kwargs,
qtensor_id,
fqn,
cur_module,
OpQuantizeabilityType.NOT_QUANTIZEABLE,
)
output = orig_module_call(self, *args, **kwargs)
# if this fp32 was inplace, make sure to set the output dtype
# back to torch.float
if hasattr(output, "_qtensor_info"):
del output._qtensor_info
if first_call and parent_module is not None:
parent_qstate_fc = getattr(
parent_module, "_auto_quant_state", None
)
if parent_qstate_fc:
output = (
parent_qstate_fc.first_call_op_prepare_after_hook(
cur_module,
output,
args,
qtensor_id,
OpQuantizeabilityType.NOT_QUANTIZEABLE,
)
)
else:
output = orig_module_call(self, *args, **kwargs)
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
nonlocal first_call
try:
if first_call:
init_model_quant_state(self, module_id_to_fqn, configure)
global_op_idx[0] = 0
output = super().__call__(*new_args, **new_kwargs)
if first_call:
for _, v in self.named_modules():
if hasattr(v, "_auto_quant_state"):
v._auto_quant_state.insert_observers(v)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
first_call = False
def save_qconf_summary(self, qconf_summary):
r"""
This function is about save model's quant_state_map to a json file.
"""
assert (
qconf_summary is not None
), "A configure file name should be given to save the qconf_summary"
quant_state_map = self._fqn_to_auto_quant_state_map
# If user have given a json file, we will save the qconf_summary according to the user's setting,
# otherwise, we will first get a default_recipe, and then save the default_recipe's setting.
if not hasattr(self, "_qconf_summary"):
# compute scales and zero_point.
attach_scale_zp_values_to_model(model)
nodes = convert_quant_state_map_to_nodes(quant_state_map)
# pooling and lstm's input and output should have same scale_zp.
sync_pool_and_lstm_input_output_scale_zp(quant_state_map, nodes)
get_default_recipe(nodes)
else:
if check_model_obsever_has_run(model):
# re-compute the scales and zp if user load a json file and re-do the calibration step.
attach_scale_zp_values_to_model(model)
else:
# do nothing if user just loaded a json file and not re-do the calibration step
pass
# Setting model qconf_summary attr which can be easily to check the whether the scale/zp has been computed.
self._qconf_summary = qconf_summary
save_quant_state(quant_state_map, qconf_summary)
def load_qconf_summary(self, qconf_summary):
r"""
This function is about load the user qconf_summary, which will overwrite the model's quant_state_map.
"""
if os.path.exists(qconf_summary) and os.stat(qconf_summary).st_size != 0:
self._qconf_summary = qconf_summary
load_qconf_summary_to_model(self, qconf_summary)
else:
AssertionError(
False,
("Can not load a empty file or none existed file" + qconf_summary),
)
model.q_config = configure
# For Dynamic quantization, most user model has a dynamic control flow, the DBR
# doesn't support it now, so there skip DRB when user want to run dynamic quantization.
if not isinstance(configure.activation(), PlaceholderObserver):
model.__class__ = QuantizationInterceptionModule
# init model quantization state using example_inputs
assert example_inputs is not None or example_kwarg_inputs is not None, (
"IPEX: example_inputs and example_kwarg_inputs cannot be None at same time "
"for static quantization."
)
if example_kwarg_inputs is None:
model(*example_inputs)
elif example_inputs is None:
model(**example_kwarg_inputs)
else:
AssertionError(
False,
"IPEX quantization.prepare: example_inputs and example_kwarg_inputs cannot be set at same time "
"for static quantization.",
)
return model
def copy_prepared_model(model):
copied_model = copy.deepcopy(model)
copied_model.q_config = model.q_config
if isinstance(copied_model.q_config.activation(), PlaceholderObserver):
return copied_model
copied_model._fqn_to_auto_quant_state_map = copy.deepcopy(
model._fqn_to_auto_quant_state_map
)
named_modules = list(copied_model.named_modules())
for fqn, v in named_modules:
fqn_to_use_for_key = get_fqn_valid_for_module_dict_key(fqn)
if fqn_to_use_for_key in copied_model._fqn_to_auto_quant_state_map:
auto_quant_state = copied_model._fqn_to_auto_quant_state_map[
fqn_to_use_for_key
]
object.__setattr__(v, "_auto_quant_state", auto_quant_state)
if hasattr(model, "_qconf_summary"):
copied_model._qconf_summary = copy.deepcopy(model._qconf_summary)
copied_model.__class__ = model.__class__
return copied_model
def auto_convert(
module: torch.nn.Module,
) -> torch.nn.Module:
def convert_to_dispatch_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationConvertTensorProxy) # type: ignore[arg-type]
else:
return x
global_disable_torch_function_override = False
def check_add_has_scalar_tensor_input(args):
r"""
This function is about check add whether has scalar(tensor) input.
"""
nonlocal global_disable_torch_function_override
old_global_disable_torch_function_override = (
global_disable_torch_function_override
)
global_disable_torch_function_override = True
for arg in args:
if not isinstance(arg, torch.Tensor) or arg.dim() == 0:
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
return True
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
return False
class QuantizationConvertTensorProxy(torch.Tensor):
"""
An override of `torch.Tensor` to enable dynamic dispatch for
quantization inference.
For each function with a `__torch_fuction__` override, this proxy does
the following for functions which need quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls `_auto_quant_state.op_convert_before_hook`.
3. executes the function, with target, args and kwargs possibly modified
by (2)
4. calls `_auto_quant_state.inference_function_after_hook`.
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original function.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
nonlocal global_disable_torch_function_override
if (
# global override means disable the override here
global_disable_torch_function_override
or
# to prevent printing things from going into an infinite loop
func == torch.Tensor.__repr__
or
# we don't need to override getters in this framework
func.__name__ == "__get__"
):
return super().__torch_function__(func, types, args, kwargs)
kwargs = kwargs if kwargs else {}
# if we are in a function, the current module is always a parent
parent_module = cur_module
hook_type = get_torch_function_hook_type(parent_module, func)
# Don't support torch.add(tensor, scaler)
# case, scalar+scalar, pytorch trace will convert the first input as a tensor,
# but we didn't collect the quant info at calibration step, which can't get
# quant info here(asster KeyError), so we disable torch.add(tensor, scaler) quantizaiton.
if (
hook_type is HookType.OP_HOOKS
and func in [torch.add, torch.Tensor.add]
and check_add_has_scalar_tensor_input(args)
):
hook_type = None
if hook_type is HookType.OP_HOOKS:
qstate: AutoQuantizationState = parent_module._auto_quant_state # type: ignore[union-attr]
# before hooks
qstate.validate_cur_op(func)
func, args, kwargs = qstate.op_convert_before_hook(
func, args, kwargs, parent_module
) # type: ignore[arg-type]
# forward
output = super().__torch_function__(func, types, args, kwargs)
# after hooks
output = qstate.op_convert_after_hook(func, output)
qstate.mark_cur_op_complete(func)
else: # HookType.NONE
output = super().__torch_function__(func, types, args, kwargs)
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationConvertTensorProxy
)
assert output is not NotImplemented
return output
def __repr__(self):
return f"QuantizationConvertTensorProxy({super().__repr__()})"
cur_module = None
module_stack: List[torch.nn.Module] = []
assert len(module.__class__.__bases__) == 1
class QuantizationDispatchModule(module.__class__.__bases__[0]): # type: ignore[name-defined]
"""
An override of user defined subclass of `nn.Module` to enable
dynamic tracing for quantization, after model conversion
to quantized domain.
`cur_module` keeps track of the current module in the stack.
Tensor arguments are converted to `QuantizationConvertTensorProxy`.
We override the `__call__` function to do the following for each
module:
If the module is an op which needs quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls parent module's `._auto_quant_state.op_convert_before_hook`
3. executes the original module forward
4. calls parent module's `_auto_quant_state.op_convert_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original module forward.
"""
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_dispatch_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_dispatch_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
nonlocal cur_module
old_module = cur_module
cur_module = self
nonlocal global_disable_torch_function_override
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
hook_type = get_module_hook_type(parent_module, cur_module)
if hook_type is HookType.OP_HOOKS:
# before hooks
qstate: AutoQuantizationState = (
parent_module._auto_quant_state
) # type: ignore[union-attr, assignment]
qstate.validate_cur_op(cur_module)
# If we are in this hook, `cur_module` is a leaf module.
# Therefore, we do not need to override any of its
# children. Disabling the overrides for performance.
old_global_disable_torch_function_override = (
global_disable_torch_function_override
)
global_disable_torch_function_override = True
is_lstm_packed_input = isinstance(
cur_module, torch.nn.LSTM
) and isinstance(args[0], PackedSequence)
if is_lstm_packed_input:
args = _convert_PackedSequence_to_tuple_lstm(args)
_, args, kwargs = qstate.op_convert_before_hook(
cur_module, args, kwargs, cur_module
)
if is_lstm_packed_input:
args = _convert_tuple_to_PackedSequence_lstm(args)
if type(cur_module) in quantized_modules_has_weights:
weights = qstate.op_weight_convert_before_hook(cur_module)
output = module_call_to_function_call(self, args, weights)
else:
output = orig_module_call(self, *args, **kwargs)
# after hooks
if is_lstm_packed_input:
output = _convert_PackedSequence_to_tuple_lstm(output)
output = qstate.op_convert_after_hook(cur_module, output)
if is_lstm_packed_input:
output = _convert_tuple_to_PackedSequence_lstm(output)
# Re-enable the override.
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
cur_qstate: AutoQuantizationState = cur_module._auto_quant_state
cur_qstate.reset_to_new_call()
# before hooks (TODO)
# forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
# For the sake of performance, we assume no overrides
# are needed for quantizing/dequantizing things
old_global_disable_torch_function_override = (
global_disable_torch_function_override
)
global_disable_torch_function_override = True
output = cur_qstate.outputs_convert_hook(output)
global_disable_torch_function_override = (
old_global_disable_torch_function_override
)
cur_qstate.validate_is_at_last_seen_idx()
else:
output = orig_module_call(self, *args, **kwargs)
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
try:
output = super().__call__(*new_args, **new_kwargs)
def unwrap_proxy(a):
if isinstance(a, QuantizationConvertTensorProxy):
a.__class__ = torch.Tensor # type: ignore[assignment]
return a
output = map_aggregate(output, unwrap_proxy)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
# If module doesn't have a configure_file attr, we can say that user didn't run save_qconf_summary method which have
# computed the scales and zp, or didn't use the user's setting from a given json file(load_qconf_summary), we need to compute
# the scale and zp here.
if not hasattr(module, "_qconf_summary"):
quant_state_map = module._fqn_to_auto_quant_state_map
# compute scales and zero_point.
attach_scale_zp_values_to_model(module)
nodes = convert_quant_state_map_to_nodes(quant_state_map)
# pooling and lstm's input and output should have same scale_zp.
sync_pool_and_lstm_input_output_scale_zp(quant_state_map, nodes)
get_default_recipe(nodes)
else:
if check_model_obsever_has_run(module):
# re-compute the scales and zp if user load a json file and re-do the calibration step.
attach_scale_zp_values_to_model(module)
else:
# clear observer if module have, this will works when the user's json setting is loaded
# and not re-do the calibration step.
for _, v in module._fqn_to_auto_quant_state_map.items():
v.tensor_id_to_observer.clear()
v.weight_tensor_id_to_observer.clear()
# Attach quant_info to parent each module
attach_op_convert_info_to_model(module)
swap_child_modules(module)
module.__class__ = QuantizationDispatchModule
return module
| 36,581 | 45.541985 | 129 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_qconfig.py | import torch
from torch.ao.quantization import (
PlaceholderObserver,
PerChannelMinMaxObserver,
HistogramObserver,
QConfig,
QConfigMapping,
)
from ._smooth_quant import SmoothQuantActivationObserver, SmoothQuantWeightObserver
_default_weight_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
)
default_static_qconfig = QConfig(
activation=HistogramObserver.with_args(reduce_range=False),
weight=_default_weight_observer,
)
"""
Default qconfig configuration for static quantization.
"""
default_static_qconfig_mapping = QConfigMapping().set_global(default_static_qconfig)
default_dynamic_qconfig = QConfig(
activation=PlaceholderObserver.with_args(dtype=torch.float, is_dynamic=True),
weight=_default_weight_observer,
)
"""
Default qconfig configuration for dynamic quantization.
"""
default_dynamic_qconfig_mapping = QConfigMapping().set_global(default_dynamic_qconfig)
def get_smooth_quant_qconfig_mapping(
alpha=0.5,
act_observer=None,
act_ic_observer=None,
wei_observer=None,
wei_ic_observer=None,
):
"""
Configuration with SmoothQuant for static quantization of large language models (LLM)
For SmoothQuant, see https://arxiv.org/pdf/2211.10438.pdf
Arguments:
alpha: Hyper-parameter for SmoothQuant.
act_observer: Observer for activation of ops other than nn.Linear. HistogramObserver by default.
For nn.Linear with SmoothQuant enabled, q-param is calculated based on act_ic_observer's
and wei_ic_observer's min/max. It is not affected by this argument.
act_ic_observer: Per-input-channel Observer for activation. For nn.Linear with SmoothQuant enabled only.
PerChannelMinMaxObserver by default.
wei_observer: Observer for weight of all weighted ops. For nn.Linear with SmoothQuant enabled, it
calculates q-params after applying scaling factors. PerChannelMinMaxObserver by default.
wei_ic_observer: Per-input-channel Observer for weight. For nn.Linear with SmoothQuant enabled only.
PerChannelMinMaxObserver by default.
"""
qconfig = QConfig(
activation=SmoothQuantActivationObserver.with_args(
reduce_range=False,
alpha=alpha,
act_observer=act_observer,
act_ic_observer=act_ic_observer,
),
weight=SmoothQuantWeightObserver.with_args(
dtype=torch.qint8,
qscheme=torch.per_channel_symmetric,
alpha=alpha,
wei_observer=wei_observer,
wei_ic_observer=wei_ic_observer,
),
)
return QConfigMapping().set_global(qconfig)
# For weight-only quantization
def get_weight_only_quant_qconfig_mapping(weight_dtype: torch.dtype = torch.qint8):
dtype_to_qscheme = {
torch.qint8: torch.per_channel_affine,
# It is required to use per_channel_affine_float_qparams for quint4x2 by PyTorch
torch.quint4x2: torch.per_channel_affine_float_qparams,
}
weight_qscheme = dtype_to_qscheme[weight_dtype]
_weight_only_quant_qconfig = QConfig(
activation=PlaceholderObserver.with_args(dtype=torch.float, is_dynamic=False),
weight=PerChannelMinMaxObserver.with_args(
dtype=weight_dtype, qscheme=weight_qscheme
),
)
weight_only_quant_qconfig_mapping = QConfigMapping().set_global(
_weight_only_quant_qconfig
)
return weight_only_quant_qconfig_mapping
| 3,627 | 37.189474 | 116 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_recipe.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from intel_extension_for_pytorch.nn.functional import interaction
from ._utils import ParentNode, set_node_output_quantized
add_inplace_ops = [str(torch.Tensor.add_)]
add_ops = [str(torch.add), str(torch.Tensor.add)]
elt_wise_q_ops = [str(torch.Tensor.relu), str(torch.relu), str(F.relu), str(nn.ReLU)]
elt_wise_noq_ops = [
str(torch.relu_),
str(torch.sigmoid_),
str(nn.ReLU),
str(torch.Tensor.relu_),
str(torch.Tensor.sigmoid_),
str(torch.nn.Hardtanh),
str(F.hardtanh),
str(F.hardtanh_),
str(torch.nn.ELU),
str(F.elu),
str(F.elu_),
str(nn.SiLU),
str(F.silu),
str(torch.Tensor.sigmoid),
str(torch.sigmoid),
str(F.sigmoid),
str(nn.Sigmoid),
str(F.gelu),
str(nn.GELU),
]
conv_gemm_ops = [
str(F.conv2d),
str(nn.Conv2d),
str(F.conv3d),
str(nn.Conv3d),
str(torch.conv2d),
str(torch.conv3d),
str(F.conv_transpose2d),
str(torch.nn.ConvTranspose2d),
str(F.conv_transpose3d),
str(torch.nn.ConvTranspose3d),
str(torch.conv_transpose2d),
str(torch.conv_transpose2d),
str(F.linear),
str(nn.Linear),
str(torch.matmul),
str(torch.Tensor.matmul),
str(torch.bmm),
str(torch.Tensor.bmm),
]
conv_ops = [
str(F.conv2d),
str(nn.Conv2d),
str(F.conv3d),
str(nn.Conv3d),
str(torch.conv2d),
str(torch.conv3d),
str(F.conv_transpose2d),
str(torch.nn.ConvTranspose2d),
str(F.conv_transpose3d),
str(torch.nn.ConvTranspose3d),
str(torch.conv_transpose2d),
str(torch.conv_transpose2d),
]
rnn_ops = [str(torch.nn.LSTM)]
# Those ops only support s8->s8 path, and also require the qscheme is per_tensor_symmetric.
s8_s8_symmetric_ops = [
str(interaction),
str(torch.ops.torch_ipex.interaction_forward),
str(torch.embedding_bag),
str(F.embedding_bag),
str(torch.nn.EmbeddingBag),
]
conv_gemm_fs = [
str(F.conv2d),
str(F.conv3d),
str(F.conv_transpose2d),
str(F.conv_transpose3d),
str(torch.conv2d),
str(torch.conv3d),
str(torch.conv_transpose2d),
str(torch.conv_transpose2d),
str(F.linear),
str(torch._C._nn.linear),
]
def _default_recipe_init(nodes):
r"""
This function is about init default recipe: setting the quantizable op's inf dtype to qint8 or quint8 according the qconfig,
there have some special cases, for some ops(interaction, EmbeddingBag), we only support some special \
quantization path, so if the related qconfig
doesn't meet the requirements, we will not set their inf dtype.
"""
for node in nodes:
if isinstance(node, ParentNode):
continue
if node.qconfig is not None:
# Add q+dq before the quantizable op firstly.
for idx, tensor_info in enumerate(node.input_tensor_infos):
# only support fp32 tensor->int8 tensor
if (
tensor_info is not None
and (tensor_info.orig_dtype == torch.float32)
and tensor_info.id in node.input_scale_zero
):
# gemm's weight
if node.type in conv_gemm_fs and idx == 1:
tensor_info.inf_dtype = node.qconfig.weight().dtype
else:
tensor_info.inf_dtype = node.qconfig.activation().dtype
node.input_tensor_force_inf_dtype[idx] = tensor_info.inf_dtype
# For EmbeddingBag and interaction, we need to check the qconfig's setting, if not meet the requirements, \
# reset the inputs'(or weight) inf dtype
for tensor_info in node.weight_tensor_infos:
# nn.EmbeddingBag use activation observer and only support torch.qint8 and torch.per_tensor_symmetric
if (
tensor_info is not None
and (tensor_info.orig_dtype == torch.float32)
and (
str(node.idx) + "_" + str(tensor_info.id)
in node.weight_scale_zero
)
):
if (
node.type == str(torch.nn.EmbeddingBag)
and node.qconfig.activation().dtype == torch.qint8
and node.qconfig.activation().qscheme
== torch.per_tensor_symmetric
) or node.type != str(torch.nn.EmbeddingBag):
tensor_info.inf_dtype = node.qconfig.weight().dtype
# interaction only supports qint8 and torch.per_tensor_symmetric, if not meet the requirement,
# reset the input's inf dtype.
if node.type in s8_s8_symmetric_ops:
if not (
node.qconfig.activation().dtype == torch.qint8
and node.qconfig.activation().qscheme == torch.per_tensor_symmetric
):
for idx, tensor_info in enumerate(node.input_tensor_infos):
if tensor_info is not None:
tensor_info.inf_dtype = tensor_info.orig_dtype
node.input_tensor_force_inf_dtype[
idx
] = tensor_info.inf_dtype
# For LSTM, if it's input is a PackedSequence, we don't support ot now.
# TODO: support PackedSequence input for quantization LSTM.
if (
node.type in rnn_ops
and len(node.input_tensor_infos) > 2
and node.input_tensor_infos[1].orig_dtype == torch.int64
):
for idx, tensor_info in enumerate(node.input_tensor_infos):
if tensor_info is not None:
tensor_info.inf_dtype = tensor_info.orig_dtype
node.input_tensor_force_inf_dtype[idx] = tensor_info.inf_dtype
for idx, tensor_info in enumerate(node.weight_tensor_infos):
if tensor_info is not None:
tensor_info.inf_dtype = tensor_info.orig_dtype
# TODO: making fusion pattern check more general.
def _find_fused_node_with_cur_elt_wise(node, ops):
r"""
Find a node before cur elt_wise which can be fused with cur elt_wise, which used by check
whether has a op can be fused with elt_wise.
"""
if len(node.pre_nodes) == 0:
return None
pre_node = node.pre_nodes[0]
if pre_node is not None:
if pre_node.type in ops:
if len(pre_node.post_nodes) == 1:
return pre_node
elif len(node.post_nodes) == 1 and _find_conv_or_gemm_swish_fusion_node(
node.post_nodes[0]
):
# conv+sigmoid+mul
return pre_node
else:
return None
elif (
pre_node.type in ([str(nn.Identity)] + elt_wise_q_ops + elt_wise_noq_ops)
and len(pre_node.post_nodes) == 1
and len(pre_node.pre_nodes) > 0
):
return _find_fused_node_with_cur_elt_wise(pre_node.pre_nodes[0], ops)
else:
return None
else:
return None
def _find_fused_node_with_cur_add(node, ops):
r"""
Find a node before the cur node which can be fused with cur add node, which used to check
whether has a node can be fused with add.
"""
if len(node.pre_nodes) == 0:
return None
if len(node.pre_nodes) > 0:
if (
node.pre_nodes[0].type in ops
and len(node.pre_nodes[0].post_nodes) == 1
and node.pre_nodes[0].qconfig is not None
):
return node.pre_nodes[0]
elif (
node.pre_nodes[0].type == str(nn.Identity)
and len(node.pre_nodes[0].post_nodes) == 1
and len(node.pre_nodes[0].pre_nodes) > 0
):
fused_node = _find_fused_node_with_cur_add(node.pre_nodes[0], ops)
if fused_node is not None:
return node.pre_nodes[0]
else:
return None
if len(node.pre_nodes) == 2:
if (
node.pre_nodes[1].type in ops
and len(node.pre_nodes[1].post_nodes) == 1
and node.pre_nodes[1].qconfig is not None
):
return node.pre_nodes[1]
elif (
node.pre_nodes[1].type == str(nn.Identity)
and len(node.pre_nodes[1].post_nodes) == 1
and len(node.pre_nodes[1].pre_nodes) > 0
):
fused_node = _find_fused_node_with_cur_add(node.pre_nodes[1], ops)
if fused_node is not None:
return node.pre_nodes[1]
else:
return None
return None
def _find_conv_or_gemm_swish_fusion_node(node):
r"""
Check whether has conv/gemm_sigmoid_mul fusion before cur node(including).
conv/gemm
/ \
/ sigmoid
\ /
mul(_)
"""
mul_ops = [str(torch.mul), str(torch.Tensor.mul), str(torch.Tensor.mul_)]
sigmoid_ops = [
str(torch.Tensor.sigmoid),
str(torch.Tensor.sigmoid_),
str(torch.sigmoid),
str(torch.sigmoid_),
str(F.sigmoid),
str(torch.nn.Sigmoid),
]
if node.type in mul_ops and len(node.pre_nodes) == 2:
if (
node.pre_nodes[0].type in conv_gemm_ops
and node.pre_nodes[1].type in sigmoid_ops
):
if (
len(node.pre_nodes[0].post_nodes) == 2
and len(node.pre_nodes[1].post_nodes) == 1
and node.pre_nodes[1] in node.pre_nodes[0].post_nodes
):
return node.pre_nodes[0]
elif (
node.pre_nodes[1].type in conv_gemm_ops
and node.pre_nodes[0].type in sigmoid_ops
):
if (
len(node.pre_node[1].post_nodes) == 2
and len(node.pre_nodes[0].post_nodes) == 1
and node.pre_nodes[0] in node.pre_node[1].post_nodes
):
return node.pre_nodes[1]
return None
def _check_has_quantizable_node_before_node(node):
r"""
This function is about check whether has a quantizable node before(including) the given node,
which is used to check whether insert fake quant before one quantizable node or not. For example,
given_node->quantizable_node, if the given node is a none-quantizable node(also not a fusion groups nodes),
we can avoid inserting fake quant before this quantizable node.
"""
if node.type == str(nn.Identity):
if len(node.pre_nodes) > 0:
return _check_has_quantizable_node_before_node(node.pre_nodes[0])
else:
return False
else:
# check whether has a qconfig
if node.qconfig is None:
if len(node.pre_nodes) == 0:
return False
# conv/gemm+add(_)+elt_wise
if node.type in elt_wise_noq_ops:
fused_elt_wise_node = _find_fused_node_with_cur_elt_wise(
node, conv_gemm_ops + add_ops + add_inplace_ops
)
if fused_elt_wise_node is not None:
# if fused_elt_wise_node is add_inplace_op, make sure it can also fused with conv/gemm.
if fused_elt_wise_node.type in add_inplace_ops:
fused_add_node = _find_fused_node_with_cur_add(
node, conv_gemm_ops
)
if (
fused_add_node is not None
and fused_add_node.qconfig is not None
):
return True
else:
return False
else:
if fused_elt_wise_node.qconfig is not None:
return True
else:
return False
elif node.type in add_inplace_ops: # check gemm+add_
fused_add_wise_node = _find_fused_node_with_cur_add(node, conv_gemm_ops)
if (
fused_add_wise_node is not None
and fused_add_wise_node.qconfig is not None
):
return True
# conv+sigmoid+mul(_)
fused_conv_or_gemm_swish_node = _find_conv_or_gemm_swish_fusion_node(node)
if (
fused_conv_or_gemm_swish_node is not None
and fused_conv_or_gemm_swish_node.qconfig is not None
):
return True
return False
else:
if node.type in s8_s8_symmetric_ops:
if node.type in [
str(interaction),
str(torch.ops.torch_ipex.interaction_forward),
]:
for force_inf_dtype in node.input_tensor_force_inf_dtype:
if force_inf_dtype.inf_dtype == torch.qint8:
return True
return False
else:
# EmbeddingBag
if node.weight_tensor_infos[0].inf_dtype == torch.qint8:
return True
else:
return False
else:
# for none ipex customer op, if have a qconfig, we can say it is a quantizable op.
return True
def _check_has_quantizable_node_after_node(node):
r"""
This function is about check whether all quantizable nodes after the given node,
which is used to check whether insert fake quant before one quantizable node or not.
"""
if len(node.post_nodes) > 0:
output = True
for i in range(len(node.post_nodes)):
if node.post_nodes[i].qconfig is None:
output = False
return output
else:
return False
def _add_recipe(node):
r"""
Case1: add has pre gemm node.
Given gemm op gemm op gemm op
\ / \ / \ /
\ / ==> fake_quant (fake_quant?) ==> \ (fake_quant?)
\ / \ / \ /
add add add
gemm fp32_op gemm quantizable_op
==> \ / \ /
\ / or \ fake_quant
\ / \ /
add add
Case2: add doesn't have pre conv/gemm node.
For this case, if one add input has one none-quantizable op, we will don't insert fake quant before it.
"""
def reset_input_inf_dtype_to_orig_dtype(node, input_idx):
if node.input_tensor_infos[input_idx] is not None:
if (
node.input_tensor_infos[input_idx]
in node.pre_nodes[0].output_tensor_infos
):
pre_node = node.pre_nodes[input_idx]
elif (
len(node.pre_nodes) == 2
and node.input_tensor_infos[input_idx]
in node.pre_nodes[1].output_tensor_infos
):
pre_node = node.pre_nodes[1]
else:
pre_node = None
if pre_node is not None:
add_quantize_add_input_idx = _check_has_quantizable_node_before_node(
pre_node
)
else:
add_quantize_add_input_idx = False
if not add_quantize_add_input_idx:
node.input_tensor_infos[input_idx].inf_dtype = node.input_tensor_infos[
input_idx
].orig_dtype
node.input_tensor_force_inf_dtype[input_idx] = node.input_tensor_infos[
input_idx
].inf_dtype
conv_gemm_node = _find_fused_node_with_cur_add(node, conv_gemm_ops)
conv_node = _find_fused_node_with_cur_add(node, conv_ops)
if conv_gemm_node is None:
# If pre_nodes don't have gemm node, need to check whether have quantizable node before it,
# if does't have quantizable node before it, we will not insert fake quant before add.
# hoping all input nodes are quantizable node.
if len(node.pre_nodes) > 0:
add_1_has_pre_quantizable_op = _check_has_quantizable_node_before_node(
node.pre_nodes[0]
)
add_2_has_pre_quantizable_op = False
if len(node.pre_nodes) == 2:
add_2_has_pre_quantizable_op = _check_has_quantizable_node_before_node(
node.pre_nodes[1]
)
if not (add_1_has_pre_quantizable_op and add_2_has_pre_quantizable_op):
for idx, tensor_info in enumerate(node.input_tensor_infos):
tensor_info.inf_dtype = tensor_info.orig_dtype
node.input_tensor_force_inf_dtype[idx] = tensor_info.inf_dtype
else:
for idx, tensor_info in enumerate(node.input_tensor_infos):
tensor_info.inf_dtype = tensor_info.orig_dtype
node.input_tensor_force_inf_dtype[idx] = tensor_info.inf_dtype
else:
# add can fused with gemm.
if (
node.input_tensor_infos[0] is not None
and node.input_tensor_infos[0] in conv_gemm_node.output_tensor_infos
):
node.input_tensor_infos[0].inf_dtype = node.input_tensor_infos[0].orig_dtype
node.input_tensor_force_inf_dtype[0] = node.input_tensor_infos[0].inf_dtype
# TODO: set another input's dtype for conv nodes when oneDNN is ready.
if conv_node is None or not _check_has_quantizable_node_after_node(node):
# set another input's dtype, if another's input is from non-quantizable op, we can remove the fake quant.
reset_input_inf_dtype_to_orig_dtype(node, 1)
elif (
node.input_tensor_infos[1] is not None
and node.input_tensor_infos[1] in conv_gemm_node.output_tensor_infos
):
node.input_tensor_infos[1].inf_dtype = node.input_tensor_infos[1].orig_dtype
node.input_tensor_force_inf_dtype[1] = node.input_tensor_infos[1].inf_dtype
# TODO: set another input's dtype for conv nodes when oneDNN is ready.
if conv_node is None or not _check_has_quantizable_node_after_node(node):
# set another input's dtype, if another's input is from non-quantizable op, we can remove the fake quant.
reset_input_inf_dtype_to_orig_dtype(node, 0)
# get a default recipe
def get_default_recipe(nodes):
r"""
This function is about get default recipe which set where fake quant is inserted for the quantizable ops.
"""
# step1: Quantization state init. Quantize inputs before quantizable node by setting their input's inf_dtype to
# qconfig.activation().dtype, and also setting the weight's inf_dtype to
# qconfig.weight().dtype if a module has a weight.
_default_recipe_init(nodes)
# step2: Optimization
# 1. For conv, gemm, and LSTM, we always quantize its' inputs and weight, so we keep them state.
# and for embedding_bag, which only has a weight, we always quantize it's weight to
# save memory space and bandwidth, we also keep it's state.
# 2. For remaining quantizable ops (pooling, elt-wise op and add) which meet the following requirements, we will
# update them inputs' quantization state.
# 1. If it is a part of a quantized fusion pattern, don't need to quantize any inputs from inside the pattern.
# 2. If any of its inputs outside the fusion pattern are from non-quantized op, don't quantize all inputs outside the pattern.
# 3. If it is not part of a quantized fusion pattern, don't quantize all inputs if its one input from non-quantized op.
# 3. For quantizable ops (pooling, relu, flatten, interation and embedding) forcing quantized output, need to \
# quantize its output if it is quantized.
# 4. For interation and embedding, we only support s8->s8 symmetric quantization, so if doesn't meet the \
# requiresments, don't need to quantize its inputs.
# Note: the fusion pattern we are supported is conv/gemm/add + elt-wise, conv/gemm + add, conv/gemm + add + elt-wise.
# which means some ops can be combined with a single op to compute, but they are mathematically equivalent.
embedding_bag_ops = [
str(torch.embedding_bag),
str(F.embedding_bag),
str(torch.nn.EmbeddingBag),
]
for node in nodes:
if isinstance(node, ParentNode):
continue
if node.qconfig is not None and node.type not in (
conv_gemm_ops + rnn_ops + embedding_bag_ops
):
if node.type in add_ops:
# gemm+add fusion
_add_recipe(node)
elif node.type in elt_wise_q_ops:
# don't have a pre_node, we can say it doesn't have a pre quantizable node.
has_pre_quantized_node = True
# If Has gemm(add) pre_op can be fused, not insert fake quant.
if len(node.pre_nodes) > 0:
if (
_find_fused_node_with_cur_elt_wise(
node, conv_gemm_ops + add_ops + add_inplace_ops
)
is not None
):
has_pre_quantized_node = False
else:
has_pre_quantized_node = (
_check_has_quantizable_node_before_node(node.pre_nodes[0])
)
else:
has_pre_quantized_node = False
if not has_pre_quantized_node:
node.input_tensor_infos[0].inf_dtype = node.input_tensor_infos[
0
].orig_dtype
node.input_tensor_force_inf_dtype[0] = node.input_tensor_infos[
0
].inf_dtype
else:
# For other quantizable node, we don't need add fake quant before it if it's pre node is one none-quantizable op.
# Now all other quantizable node only have one input info, so we can check the one pre input node info to check
# whether has a pre quantizable node.
has_pre_quantized_node = True
if len(node.pre_nodes) == 1:
has_pre_quantized_node = _check_has_quantizable_node_before_node(
node.pre_nodes[0]
)
elif len(node.pre_nodes) == 0:
has_pre_quantized_node = False
# the node's pre node doesn't support int8 output.
if not has_pre_quantized_node:
node.input_tensor_infos[0].inf_dtype = node.input_tensor_infos[
0
].orig_dtype
node.input_tensor_force_inf_dtype[0] = node.input_tensor_infos[
0
].inf_dtype
set_node_output_quantized(nodes)
| 23,582 | 41.64557 | 132 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/intel_extension_for_pytorch/quantization/_quantization_state.py | from typing import Callable, List, Tuple, Any, Optional, Dict
import torch
import torch.nn.functional as F
import intel_extension_for_pytorch._C as core
from ._utils import (
OpQuantizeabilityType,
is_leaf,
get_fqn_valid_for_module_dict_key,
quantized_modules_has_weights,
int8_int8_ops,
)
from ._quantization_state_utils import (
SeenQOpInfo,
SeenNonQOpInfo,
QTensorInfo,
op_needs_quantization,
get_input_observed_arg_idxs,
get_weight_arg_idx,
iterate_and_apply,
get_input_args_quant_dequant_info,
_raise_obs_not_found_error,
get_weight_args_quant_dequant_info,
_raise_obs_op_mismatch,
ops_are_related,
iterate_and_apply_convert,
set_tensor_info_dtype,
)
from ._smooth_quant import SmoothQuantActivationObserver, SmoothQuantWeightObserver
OpConvertInfo = Tuple[
# quantized equivalent of original op (None means keep original)
# Optional[Callable],
# arg_quant_infos, each element is (scale, zp, dtype) for quantized and None otherwise
List[Optional[Tuple[float, int, torch.dtype]]],
List[bool],
]
# TODO(future PR): maybe better name
# TODO(future PR): add serialization support
class AutoQuantizationState(torch.nn.Module):
"""
Contains state necessary to perform auto quantization on the parent
`nn.Module` instance.
"""
idx: int
def __init__(self, fqn: str, qconfig: torch.ao.quantization.QConfig):
super().__init__()
self.idx = 0
self.qconfig = qconfig
self.fqn = fqn
# this is a ModuleDict in order to properly register observers
# to be within the module hierarchy.
self.tensor_id_to_observer = torch.nn.ModuleDict()
self.weight_tensor_id_to_observer = torch.nn.ModuleDict()
# TODO(future PR): include kwargs
# Note: seen quantizeable ops are recorded with an index,
# because we enforce order of execution. However, seen
# unquantizeable ops are recorded without an index, because
# we do not enforce order of execution.
self.idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo] = {}
self.seen_nonq_op_infos: List[SeenNonQOpInfo] = []
# qtensor_info objects of tensor outputs of the module, specified
# in order of iteration through the output type. Non-tensor outputs
# are represented with `None`.
self.output_qtensor_infos: List[Optional[QTensorInfo]] = []
# note: this is filled out right before convert
self.tensor_id_to_scale_zp: Dict[int, Tuple[torch.Tensor, torch.Tensor]] = {}
self.idx_to_op_convert_info: Dict[int, OpConvertInfo] = {}
self.weight_tensor_id_to_scale_zp: Dict[
str, Tuple[torch.Tensor, torch.Tensor]
] = {}
self.idx_to_op_weight_convert_info: Dict[int, OpConvertInfo] = {}
self.tensor_id_to_smooth_quant_scaling_factor: Dict[int, torch.Tensor] = {}
self.weight_tensor_id_to_smooth_quant_scaling_factor: Dict[
int, torch.Tensor
] = {}
self.idx_to_smooth_quant_scaling_factor: Dict[str, torch.Tensor] = {}
self.idx_to_weight_updated_for_smooth_quant: set[str] = set()
def get_extra_state(self):
return {"tensor_id_to_scale_zp": self.tensor_id_to_scale_zp}
def set_extra_state(self, state):
self.tensor_id_to_scale_zp = state["tensor_id_to_scale_zp"]
for _, seen_q_op_info in self.idx_to_seen_q_op_infos.items():
self.idx_to_op_convert_info[
seen_q_op_info.idx
] = self.calculate_op_convert_info(seen_q_op_info)
def has_at_least_one_seen_q_op_info(self) -> bool:
return len(self.idx_to_seen_q_op_infos) > 0
def validate_is_at_last_seen_idx(self) -> None:
is_at_last_seen_idx = len(self.idx_to_seen_q_op_infos) == 0 or self.idx == len(
self.idx_to_seen_q_op_infos
)
if not is_at_last_seen_idx:
raise AssertionError(
f"Cur idx: {self.idx}, expected idx: {len(self.idx_to_seen_q_op_infos)}"
)
def extra_repr(self) -> str:
s = ""
# idx_to_seen_q_op_infos
if len(self.idx_to_seen_q_op_infos):
s += "(seen_q_op_infos): {\n"
for k, v in self.idx_to_seen_q_op_infos.items():
s += f" {k}: {v}\n"
s += "}\n"
else:
s += "(seen_q_op_infos): {}\n"
if len(self.seen_nonq_op_infos):
s += "(seen_nonq_op_infos): {\n"
for n in self.seen_nonq_op_infos:
s += f" {n}\n"
s += "}\n"
else:
s += "(seen_nonq_op_infos): {}\n"
# output_qtensor_infos
s += "(output_qtensor_infos): ["
for i in self.output_qtensor_infos:
s += f"{i} "
s += "]\n"
if len(self.tensor_id_to_scale_zp):
s += "(tensor_id_to_scale_zp): {\n"
for k, v in self.tensor_id_to_scale_zp.items(): # type: ignore[assignment]
s += f" {k}: {v}\n"
s += "}\n"
if len(self.weight_tensor_id_to_scale_zp):
s += "(weight_tensor_id_to_scale_zp): {\n"
for k, v in self.weight_tensor_id_to_scale_zp.items(): # type: ignore[assignment]
s += f" {k}: {v}\n"
s += "}"
return s
def _get_cur_seen_q_op_info(self):
return self.idx_to_seen_q_op_infos[self.idx]
def get_cur_output_inf_dtype(self):
return self._get_cur_seen_q_op_info().output_tensor_infos[0].inf_dtype
def reset_to_new_call(self):
"""
Resets the internal op counter to start a new top level module call
"""
# torch.nn.Module __setattr__ has overhead,
# this code is the explicit fast path for `self.idx = 0`
object.__setattr__(self, "idx", 0)
def cur_op_needs_hooks(self, cur_op: Callable) -> bool:
return op_needs_quantization(cur_op)
def validate_cur_op(self, cur_op: Callable) -> None:
"""
This function is expected to be called before any new function or
module call which needs hooks. It validates that the new function or
module is of the expected type based on the order of execution.
"""
try:
seen_q_op_info = self._get_cur_seen_q_op_info()
expected_op = seen_q_op_info.type
except IndexError:
_raise_obs_not_found_error(cur_op)
if not ops_are_related(cur_op, expected_op, seen_q_op_info.type_is_module):
_raise_obs_op_mismatch(cur_op, expected_op)
def mark_cur_op_complete(self, cur_op: Callable) -> None:
"""
This function is expected to be called after a function or module
processing is complete.
"""
# torch.nn.Module __setattr__ has overhead,
# this code is the explicit fast path for `self.idx += 1`
object.__setattr__(self, "idx", self.idx + 1)
def first_call_outputs_prepare_hook(
self,
outputs: Any,
qtensor_id: List[int],
) -> Any:
"""
This function is expected to be called on the outputs of a prepared
module right before they are returned to the parent, during tracing.
"""
outputs = self._first_call_assign_qtensor_infos_to_mod_outputs(
outputs, qtensor_id
)
return outputs
def outputs_prepare_hook(
self,
outputs: Any,
) -> Any:
"""
This function is expected to be called on the outputs of a prepared
module right before they are returned to the parent.
"""
return outputs
def outputs_convert_hook(
self,
outputs: Any,
) -> Any:
"""
This function is expected to be called on the outputs of a converted
module right before they are returned to the parent.
"""
# outputs = self._maybe_mod_outputs_dtype_transform(outputs)
return outputs
def first_call_op_prepare_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
qtensor_id: List[int],
fqn: str,
root_module: torch.nn.Module,
op_quantizeability_type: OpQuantizeabilityType,
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
This function is expected to be called on args and kwargs of
`op` directly before `op` is executed, during tracing.
We record the type of `op`
and the IDs of its tensor inputs. Note: we add a placeholder for IDs
of tensor outputs, the placeholder will be filled out during the
`op_prepare_after_hook`.
The function returns modified `args` and `kwargs`.
"""
return self._first_call_op_prepare_before_hook_create_subgraphs(
op, args, kwargs, qtensor_id, fqn, root_module, op_quantizeability_type
)
def op_prepare_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
This function is expected to be called on args and kwargs of
`op` directly before `op` is executed.
We do the following:
* pass the inputs through observers, if needed
The function returns modified `args` and `kwargs`.
"""
seen_q_op_info = self._get_cur_seen_q_op_info()
def _maybe_observe(arg, tensor_info):
tensor_id = tensor_info.id
# TODO: do not run this twice on input and output
if str(tensor_id) in self.tensor_id_to_observer:
observer = self.tensor_id_to_observer[str(tensor_id)]
if isinstance(arg, torch.Tensor) and arg.dtype != torch.float32:
dtype = arg.dtype
out = observer(arg.float())
return out.to(dtype)
return observer(arg)
else:
return arg
# If user changes observer's dtype and re-do calibration, we need to update
# the tensor_info.inf_dtype and force dtype with the new oberver's dtype.
quantized_dtype = [torch.quint8, torch.qint8]
for i, tensor_info in enumerate(seen_q_op_info.input_tensor_infos):
if (
tensor_info is not None
and str(tensor_info.id) in self.tensor_id_to_observer
):
tensor_id = tensor_info.id
observer = self.tensor_id_to_observer[str(tensor_id)]
set_tensor_info_dtype(tensor_info, observer)
force_dtype = seen_q_op_info.input_tensor_force_inf_dtype[i]
if (
force_dtype in quantized_dtype
and force_dtype != tensor_info.orig_dtype
and force_dtype != observer.dtype
):
seen_q_op_info.input_tensor_force_inf_dtype[i] = observer.dtype
args = iterate_and_apply(
args, seen_q_op_info.input_tensor_infos, _maybe_observe
)
# works for nn.module case
weight_tensor_info = seen_q_op_info.weight_tensor_infos
for i, tensor_info in enumerate(weight_tensor_info):
if tensor_info is None:
continue
tensor_id = tensor_info.id
if (
str(seen_q_op_info.idx) + "_" + str(tensor_id)
in self.weight_tensor_id_to_observer
):
observer = self.weight_tensor_id_to_observer[
str(seen_q_op_info.idx) + "_" + str(tensor_id)
]
set_tensor_info_dtype(tensor_info, observer)
# if has bias, the dim is 1, we don't need run observer for it.
if isinstance(op, torch.nn.LSTM):
if op._flat_weights[i].dim() > 1:
observer(op._flat_weights[i])
else:
pass
else:
observer(op.weight)
return args, kwargs
def first_call_op_prepare_after_hook(
self,
op: Callable,
output: Any,
args: Tuple[Any, ...],
qtensor_id: List[int],
op_quantizeability_type: OpQuantizeabilityType,
) -> Any:
"""
This function is called after an op call on a prepared model.
* create an observer for the output, if needed, and record it in
`tensor_id_to_observer`
* amend the current seen op with the tensor ID of the output
"""
self._first_call_op_prepare_after_hook_adjust_subgraphs(
op, output, args, qtensor_id, op_quantizeability_type
)
return output
def op_prepare_after_hook(
self,
op: Callable,
outputs: Any,
args: Tuple[Any, ...],
global_op_idx: List[int],
) -> Any:
"""
This function is called after an op call on a prepared model.
* observe the output, if needed, which only works for OpQuantizeabilityType.QUANTIZEABLE.
TODO: remove this after all ops support INT8->FP32.
"""
seen_q_op_info = self._get_cur_seen_q_op_info()
def _observer_output(output, tensor_info):
tensor_id = tensor_info.id
if str(tensor_id) in self.tensor_id_to_observer:
observer = self.tensor_id_to_observer[str(tensor_id)]
set_tensor_info_dtype(tensor_info, observer)
observer(output.float())
if isinstance(outputs, torch.Tensor):
tensor_info = seen_q_op_info.output_tensor_infos[0]
_observer_output(outputs, tensor_info)
elif isinstance(outputs, tuple):
idx = 0
for element in outputs:
# only do observer for tensor type.
if isinstance(element, torch.Tensor):
tensor_info = seen_q_op_info.output_tensor_infos[idx]
_observer_output(element, tensor_info)
idx += 1
return outputs
def op_convert_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
root_module: torch.nn.Module,
) -> Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]:
"""
This function is called before an op call in a converted model.
For each arg in `args`, quantizes it if necessary.
Returns potentially modified `op`, potentially modified `args`,
potentially modified `kwargs`.
"""
# TODO generalize this for more things
# currently:
# * can quantize args (via arg_quant_infos)
# * can add scale and zp (via additional kwargs)
arg_quant_infos, any_arg_quant_or_dequant_needed = self.get_op_convert_info(op)
# Insert mul before nn.Linear for SmoothQuant
act_key = str(self.idx)
if act_key in self.idx_to_smooth_quant_scaling_factor:
act_scaling_factors = self.idx_to_smooth_quant_scaling_factor[act_key]
if act_scaling_factors is not None:
args = list(args)
new_act = torch.mul(args[0], act_scaling_factors)
args[0] = new_act
args = iterate_and_apply_convert(
args, arg_quant_infos, any_arg_quant_or_dequant_needed, op
)
return op, args, kwargs
def op_weight_convert_before_hook(
self,
op: Callable,
) -> Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]:
"""
This function is called before an op call in a converted model.
For each arg in `args`, quantizes it if necessary.
Returns potentially modified `op`, potentially modified `args`,
potentially modified `kwargs`.
"""
(
arg_quant_infos,
any_arg_quant_or_dequant_needed,
) = self.get_op_weight_convert_info(op)
new_args = []
if type(op) in [
torch.nn.Conv2d,
torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.Linear,
]:
tensor_arg_idx = 0
quant_info = arg_quant_infos[tensor_arg_idx]
if (
quant_info is not None
and any_arg_quant_or_dequant_needed[tensor_arg_idx]
):
scale, zp, dtype = quant_info
weight = op.weight
ch_axis = 0
if type(op) in [torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d]:
ch_axis = 1
# Update weight of nn.Linear for SmoothQuant
wei_key = str(self.idx) + "_0"
if wei_key in self.idx_to_smooth_quant_scaling_factor:
wei_scaling_factors = self.idx_to_smooth_quant_scaling_factor[
wei_key
]
if wei_scaling_factors is not None:
w_dtype = weight.dtype
if w_dtype != torch.float32:
weight = weight.to(torch.float32)
weight = torch.mul(weight, wei_scaling_factors)
if w_dtype != torch.float32:
weight = weight.to(w_dtype)
if (
torch.is_autocast_cpu_enabled()
and core.get_autocast_dtype() == torch.bfloat16
):
if weight.dtype == torch.bfloat16:
weight = weight.to(dtype=torch.float32)
if scale.numel() > 1:
arg = torch.quantize_per_channel(
weight, scale, zp, ch_axis, dtype
)
else:
arg = torch.quantize_per_tensor(
weight, scale.item(), zp.item(), dtype
)
arg = arg.dequantize()
arg = arg.to(dtype=torch.bfloat16)
else:
if scale.numel() > 1:
arg = torch.quantize_per_channel(
weight, scale, zp, ch_axis, dtype
)
else:
arg = torch.quantize_per_tensor(
weight, scale.item(), zp.item(), dtype
)
arg = arg.dequantize()
new_args.append(arg)
else:
new_args.append(op.weight)
elif isinstance(op, torch.nn.EmbeddingBag):
tensor_arg_idx = 0
quant_info = arg_quant_infos[tensor_arg_idx]
if (
quant_info is not None
and any_arg_quant_or_dequant_needed[tensor_arg_idx]
):
scale, zp, dtype = quant_info
weight = op.weight
if (
torch.is_autocast_cpu_enabled()
and core.get_autocast_dtype() == torch.bfloat16
):
if weight.dtype == torch.bfloat16:
weight = weight.to(dtype=torch.float32)
arg = torch.quantize_per_tensor(
weight, scale.item(), zp.item(), dtype
)
arg = arg.dequantize()
arg = arg.to(dtype=torch.bfloat16)
else:
arg = torch.quantize_per_tensor(
op.weight, scale.item(), zp.item(), dtype
)
arg = arg.dequantize()
new_args.append(arg)
else:
new_args.append(op.weight)
elif isinstance(op, torch.nn.LSTM):
step = 4 if op.bias else 2
weights = op._flat_weights
for tensor_arg_idx in range(0, len(arg_quant_infos), step):
quant_info = arg_quant_infos[tensor_arg_idx]
if (
quant_info is not None
and any_arg_quant_or_dequant_needed[tensor_arg_idx]
):
w_ih = weights[tensor_arg_idx]
w_hh = weights[tensor_arg_idx + 1]
w_ih_scale, w_ih_zp, w_ih_dtype = quant_info
w_hh_scale, w_hh_zp, w_hh_dtype = arg_quant_infos[
tensor_arg_idx + 1
]
if (
torch.is_autocast_cpu_enabled()
and core.get_autocast_dtype() == torch.bfloat16
):
weight_if_bf16 = w_ih.dtype == torch.bfloat16
if weight_if_bf16:
w_ih = w_ih.to(dtype=torch.float32)
w_hh = w_hh.to(dtype=torch.float32)
if w_ih_scale.numel() > 1:
w_ih = torch.quantize_per_channel(
w_ih, w_ih_scale, w_ih_zp, 0, w_ih_dtype
)
w_hh = torch.quantize_per_channel(
w_hh, w_hh_scale, w_hh_zp, 0, w_hh_dtype
)
else:
w_ih = torch.quantize_per_tensor(
w_ih, w_ih_scale.item(), w_ih_zp.item(), w_ih_dtype
)
w_hh = torch.quantize_per_tensor(
w_hh, w_hh_scale.item(), w_hh_zp.item(), w_hh_dtype
)
w_ih = w_ih.dequantize()
w_hh = w_hh.dequantize()
if weight_if_bf16:
w_ih = w_ih.to(dtype=torch.bfloat16)
w_hh = w_hh.to(dtype=torch.bfloat16)
else:
if w_ih_scale.numel() > 1:
w_ih = torch.quantize_per_channel(
w_ih, w_ih_scale, w_ih_zp, 0, w_ih_dtype
)
w_hh = torch.quantize_per_channel(
w_hh, w_hh_scale, w_hh_zp, 0, w_hh_dtype
)
else:
w_ih = torch.quantize_per_tensor(
w_ih, w_ih_scale, w_ih_zp, w_ih_dtype
)
w_hh = torch.quantize_per_tensor(
w_hh, w_hh_scale, w_hh_zp, w_hh_dtype
)
w_ih = w_ih.dequantize()
w_hh = w_hh.dequantize()
new_args.append(w_ih)
new_args.append(w_hh)
if op.bias:
new_args.append(weights[tensor_arg_idx + 2])
new_args.append(weights[tensor_arg_idx + 3])
else:
for s in range(step):
new_args.append(weights[tensor_arg_idx + s])
return new_args
def op_convert_after_hook(
self,
op: Callable,
outputs,
) -> Any:
"""
This function is called after an op call in a converted model.
"""
# we always add fakeQuant before the quantized op, but if one op doesn't support INT8->FP32,
# we need add fakeQuant here to make the quantized op call in
# INT8 path. It can be removed after all op support INT8->fp32
seen_q_op_info = self._get_cur_seen_q_op_info()
def _convert_output(
output, tensor_info, insert_fake_quant, tensor_id_to_scale_zp
):
tensor_id, inf_dtype = tensor_info.id, tensor_info.inf_dtype
# so if inf_dtype is torch.qint8, we need add fake quant here.
if (
tensor_id in tensor_id_to_scale_zp
and inf_dtype in [torch.qint8, torch.quint8]
and insert_fake_quant
):
scale, zp = tensor_id_to_scale_zp[tensor_id]
output_is_bfloat16 = False
if output.dtype == torch.bfloat16:
output_is_bfloat16 = True
output = output.to(torch.float32)
output = torch.quantize_per_tensor(
output, scale.item(), zp.item(), inf_dtype
)
output = output.dequantize()
if output_is_bfloat16:
output = output.to(torch.bfloat16)
return output
if isinstance(outputs, torch.Tensor):
tensor_info = seen_q_op_info.output_tensor_infos[0]
insert_fake_quant = seen_q_op_info.insert_fake_quant_after_outputs[0]
outputs = _convert_output(
outputs, tensor_info, insert_fake_quant, self.tensor_id_to_scale_zp
)
elif isinstance(outputs, tuple):
# TODO: handle other tuple subclasses more generically
new_outputs = []
idx = 0
for output in outputs:
if isinstance(output, torch.Tensor):
tensor_info = seen_q_op_info.output_tensor_infos[idx]
insert_fake_quant = seen_q_op_info.insert_fake_quant_after_outputs[
idx
]
output = _convert_output(
output,
tensor_info,
insert_fake_quant,
self.tensor_id_to_scale_zp,
)
new_outputs.append(output)
idx += 1
else:
new_outputs.append(output)
# hacky check for collections.namedtuple, TODO improve this
# https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple
if hasattr(outputs, "_fields"):
outputs = outputs.__class__(*new_outputs)
else:
outputs = tuple(new_outputs)
else:
pass
return outputs
def get_op_convert_info(
self,
op: Callable,
) -> OpConvertInfo:
"""
Returns the information needed for convert time modifications to `op`.
"""
return self.idx_to_op_convert_info[self.idx]
def get_op_weight_convert_info(
self,
op: Callable,
) -> OpConvertInfo:
"""
Returns the information needed for convert time modifications to `op`.
"""
return self.idx_to_op_weight_convert_info[self.idx]
def calculate_op_convert_info(
self,
seen_q_op_info: SeenQOpInfo,
) -> OpConvertInfo:
"""
This precalculates the information which will be returned by
`get_op_convert_info`.
"""
# calculate quant infos
(
arg_quant_infos,
any_arg_quant_or_dequant_needed,
) = get_input_args_quant_dequant_info(
seen_q_op_info, self.tensor_id_to_scale_zp
)
return (
arg_quant_infos,
any_arg_quant_or_dequant_needed,
)
def calculate_op_weight_convert_info(
self,
seen_q_op_info: SeenQOpInfo,
) -> OpConvertInfo:
"""
This precalculates the information which will be returned by
`get_op_convert_info`.
"""
# calculate quant infos
(
arg_quant_infos,
any_arg_quant_or_dequant_needed,
) = get_weight_args_quant_dequant_info(
seen_q_op_info, self.weight_tensor_id_to_scale_zp
)
return (
arg_quant_infos,
any_arg_quant_or_dequant_needed,
)
def _get_packed_param_name(self, seen_q_op_info: SeenQOpInfo) -> Optional[str]:
"""
If the op in seen_q_op_info has a quantized packed param, returns it.
Otherwise, returns None.
"""
return self.idx_to_packed_weight_name.get(seen_q_op_info.idx, None)
def _first_call_assign_qtensor_infos_to_mod_outputs_tensor(
self,
output: torch.Tensor,
qtensor_id: List[int],
) -> torch.Tensor:
"""
This is a helper function for _first_call_assign_qtensor_infos_to_mod_outputs
to handle iterables of tensors without code duplication.
"""
if not hasattr(output, "_qtensor_info"):
output._qtensor_info = QTensorInfo( # type: ignore[attr-defined]
qtensor_id[0], output.dtype, output.dtype
)
qtensor_id[0] += 1
self.output_qtensor_infos.append(output._qtensor_info) # type: ignore[attr-defined]
return output
def _first_call_assign_qtensor_infos_to_mod_outputs(
self,
outputs: Any,
qtensor_id: List[int],
) -> Any:
"""
Takes `outputs`, which are a set of values about to be returned from
the current module. If `_qtensor_info` attributes do not already exist
on any tensors in `outputs`, this function adds them, initializing the
dtype to `torch.float`. This allows us to reason about module output
dtypes even if the last op in the module is not quantizeable.
"""
# TODO: handle objects with deeper nested tensors
if isinstance(outputs, torch.Tensor):
self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(
outputs, qtensor_id
)
elif isinstance(outputs, tuple):
# TODO: handle other tuple subclasses more generically
new_outputs = []
for output in outputs:
if isinstance(output, torch.Tensor):
new_outputs.append(
self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(
output, qtensor_id
)
)
else:
new_outputs.append(output)
# hacky check for collections.namedtuple, TODO improve this
# https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple
if hasattr(outputs, "_fields"):
outputs = outputs.__class__(*new_outputs)
else:
outputs = tuple(new_outputs)
else:
pass
return outputs
def _first_call_op_prepare_before_hook_create_subgraphs_tensor(
self,
op: Callable,
arg: Any,
arg_tensor_infos: List[Optional[QTensorInfo]],
arg_tensor_force_inf_dtype: List[Optional[torch.dtype]],
qtensor_id: List[int],
) -> None:
"""
Runs the prepare hook during first_call for individual
tensors. If the input argument is a tensor, this function is
called directly. If the input argument is an iterable such
as a list or a tuple, this function is called on each element of
the iteratble.
"""
# TODO(next): fix this for torch.cat
if not isinstance(arg, torch.Tensor):
arg_tensor_infos.append(None)
arg_tensor_force_inf_dtype.append(None)
return
# If a tensor does not have an ID, add it. This allows
# us to track inputs shared by multiple quantizeable modules.
if not hasattr(arg, "_qtensor_info"):
arg._qtensor_info = QTensorInfo( # type: ignore[attr-defined]
qtensor_id[0], arg.dtype, arg.dtype
)
qtensor_id[0] += 1
arg_tensor_infos.append(arg._qtensor_info) # type: ignore[attr-defined]
arg_tensor_force_inf_dtype.append(arg.dtype)
def _first_call_op_prepare_before_hook_create_subgraphs(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
qtensor_id: List[int],
fqn: str,
root_module: torch.nn.Module,
op_quantizeability_type: OpQuantizeabilityType,
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
Given an op, args, kwargs about to be executed, records the subgraph
of this op in `self`.
"""
arg_tensor_infos: List[Optional[QTensorInfo]] = []
arg_tensor_force_inf_dtype: List[Optional[torch.dtype]] = []
for arg in args:
if isinstance(arg, (list, tuple)):
for inner_arg in arg:
self._first_call_op_prepare_before_hook_create_subgraphs_tensor(
op,
inner_arg,
arg_tensor_infos,
arg_tensor_force_inf_dtype,
qtensor_id,
)
else:
self._first_call_op_prepare_before_hook_create_subgraphs_tensor(
op, arg, arg_tensor_infos, arg_tensor_force_inf_dtype, qtensor_id
)
if op_quantizeability_type is OpQuantizeabilityType.NOT_QUANTIZEABLE:
op_type_is_module = isinstance(op, torch.nn.Module)
op_type: Callable = type(op) if op_type_is_module else op # type: ignore[assignment]
self.seen_nonq_op_infos.append(
SeenNonQOpInfo(str(op_type), fqn, arg_tensor_infos, [])
)
return args, kwargs
if self.idx not in self.idx_to_seen_q_op_infos:
op_type_is_module = isinstance(op, torch.nn.Module)
op_type = type(op) if op_type_is_module else op # type: ignore[assignment]
weight_tensor_infos = []
weight_idx = 0
if type(op) in quantized_modules_has_weights:
if not isinstance(op, torch.nn.LSTM):
weight_tensor_infos.append(
QTensorInfo(weight_idx, op.weight.dtype, op.weight.dtype)
)
else:
weights = op._flat_weights
for i in range(len(weights)):
weight_tensor_infos.append(
QTensorInfo(
weight_idx,
weights[weight_idx].dtype,
weights[weight_idx].dtype,
)
)
weight_idx += 1
self.idx_to_seen_q_op_infos[self.idx] = SeenQOpInfo(
self.idx,
str(op_type),
op_type_is_module,
fqn,
arg_tensor_infos,
arg_tensor_force_inf_dtype,
[],
[],
weight_tensor_infos,
self.qconfig,
)
return args, kwargs
def _first_call_op_prepare_after_hook_adjust_subgraphs(
self,
op: Callable,
outputs: Any,
args: Tuple[Any, ...],
qtensor_id: List[int],
op_quantizeability_type: OpQuantizeabilityType,
) -> None:
"""
After `op` was just executed, modifies the subgraph recorded
for this op with the information about the output. Note, this
has to be done in the "after" hook because the output of the op
does not exist in the "before" hook.
"""
# TODO(future PR): handle non-tensor outputs
def _add_output_qtensor_info(output):
output._qtensor_info = QTensorInfo(
qtensor_id[0], output.dtype, output.dtype
) # type: ignore[arg-type]
if op_quantizeability_type is OpQuantizeabilityType.QUANTIZEABLE:
target = self.idx_to_seen_q_op_infos[self.idx].output_tensor_infos
self.idx_to_seen_q_op_infos[
self.idx
].insert_fake_quant_after_outputs.append(False)
else:
target = self.seen_nonq_op_infos[-1].output_tensor_infos
target.append(output._qtensor_info)
qtensor_id[0] += 1
if isinstance(outputs, torch.Tensor):
_add_output_qtensor_info(outputs)
elif isinstance(outputs, tuple):
for element in outputs:
if isinstance(element, torch.Tensor):
_add_output_qtensor_info(element)
def _maybe_insert_input_observers(self, seen_q_op_info: SeenQOpInfo):
input_observed_arg_idxs = get_input_observed_arg_idxs(
seen_q_op_info.type, seen_q_op_info.type_is_module
)
qconfig = seen_q_op_info.qconfig
found_duplicate_input = False
for idx, tensor_info in enumerate(seen_q_op_info.input_tensor_infos):
if tensor_info is None:
continue
if (
input_observed_arg_idxs is not None
and idx not in input_observed_arg_idxs
):
continue
if qconfig is None:
# If qconfig is None, we do not need any input observers
continue
else:
# always add observer if the op can be quantized.
tensor_id = tensor_info.id # type: ignore[attr-defined]
weight_arg_idx = get_weight_arg_idx(seen_q_op_info.type)
# avoid add weight observer for dynamic quantization.
if idx == weight_arg_idx and not isinstance(
qconfig.activation(), torch.ao.quantization.PlaceholderObserver
):
# conv_transpose weight is iohw or iodhw, so we change the observer axis to 1.
if seen_q_op_info.type in [
str(F.conv_transpose2d),
str(F.conv_transpose3d),
] and isinstance(
qconfig.weight(), torch.ao.quantization.PerChannelMinMaxObserver
):
obs = qconfig.weight.with_args(ch_axis=1)()
else:
obs = qconfig.weight()
else:
obs = qconfig.activation()
if str(tensor_id) not in self.tensor_id_to_observer:
self.tensor_id_to_observer[str(tensor_id)] = obs
else:
found_duplicate_input = True
# add weight observer if the op is nn.module and has a weight.
for tensor_info in seen_q_op_info.weight_tensor_infos:
if tensor_info is None:
continue
if qconfig is None:
# If qconfig is None, we do not need any input observers
continue
else:
# always add observer if the op can be quantized.
tensor_id = tensor_info.id # type: ignore[attr-defined]
if seen_q_op_info.type == str(torch.nn.EmbeddingBag):
obs = qconfig.activation()
self.weight_tensor_id_to_observer[
str(seen_q_op_info.idx) + "_" + str(tensor_id)
] = obs
elif not isinstance(
qconfig.activation(), torch.ao.quantization.PlaceholderObserver
):
if seen_q_op_info.type in [
str(torch.nn.ConvTranspose2d),
str(torch.nn.ConvTranspose3d),
] and isinstance(
qconfig.weight(), torch.ao.quantization.PerChannelMinMaxObserver
):
obs = qconfig.weight.with_args(ch_axis=1)()
else:
obs = qconfig.weight()
self.weight_tensor_id_to_observer[
str(seen_q_op_info.idx) + "_" + str(tensor_id)
] = obs
# LSTM, we don't know whether has bais or not, so we add observer for all them, but will not use them at convert step.
# w_ih, w_hh share same observe, and b_ih, b_hh also share same observer
if seen_q_op_info.type == str(torch.nn.LSTM):
if qconfig is not None and not isinstance(
qconfig.activation(), torch.ao.quantization.PlaceholderObserver
):
for i in range(0, len(seen_q_op_info.weight_tensor_infos), 2):
tensor_id = seen_q_op_info.weight_tensor_infos[i].id
obs = qconfig.weight()
self.weight_tensor_id_to_observer[
str(seen_q_op_info.idx) + "_" + str(tensor_id)
] = obs
self.weight_tensor_id_to_observer[
str(seen_q_op_info.idx) + "_" + str(tensor_id + 1)
] = obs
# SmoothQuant: Linear activation observer and weight observer should know each other
if (
seen_q_op_info.type == str(torch.nn.Linear)
and qconfig is not None
and isinstance(qconfig.activation(), SmoothQuantActivationObserver)
and isinstance(qconfig.weight(), SmoothQuantWeightObserver)
):
x_tensor_id = seen_q_op_info.input_tensor_infos[0].id
w_tensor_id = seen_q_op_info.weight_tensor_infos[0].id
x_obs = self.tensor_id_to_observer[str(x_tensor_id)]
w_obs = self.weight_tensor_id_to_observer[
str(seen_q_op_info.idx) + "_" + str(w_tensor_id)
]
# Duplicate input:
# (1) In modules like MHA, multiple linear layers may share the same activation tensor
# In other words, multiple weight tensors share one activation tensor
# In this case, we regard these weights as a single big tensor (i.e., concat along OC axis).
# When calculating scaling factor, consider per-IC min/max of the big tensor
# So, these weights share the same per-IC observer
# (2) It is also possible that linear shares activation with some non-weighted op.
# In that case, x_obs.weight_obs is not set. Also check it here.
if not found_duplicate_input or x_obs.weight_obs is None:
x_obs.weight_obs = w_obs.ic_obs
else:
# The input (activation) has been used by other linear ops
# Weight should share the same per-IC observer with that linear
w_obs.ic_obs = x_obs.weight_obs
# In all cases, weight observer holds a reference to activation's per-IC observer
w_obs.act_obs = x_obs.ic_obs
# For all linear ops, set smooth_quant_enabled to true
# Otherwise the observers just act as normal observers
x_obs.smooth_quant_enabled = True
w_obs.smooth_quant_enabled = True
def _maybe_insert_output_observers(
self,
seen_q_op_info: SeenQOpInfo,
root_module: torch.nn.Module,
):
# always add output observer for int8_int8_ops
op_type = seen_q_op_info.type
if op_type in int8_int8_ops:
qconfig = seen_q_op_info.qconfig
for _, tensor_info in enumerate(seen_q_op_info.output_tensor_infos):
if tensor_info is None:
continue
if qconfig is None:
# If qconfig is None, we do not need any input observers
continue
else:
output_tensor_id = tensor_info.id
self.tensor_id_to_observer[
str(output_tensor_id)
] = qconfig.activation()
def insert_observers(self, root_module: torch.nn.Module):
for _, seen_q_op_info in self.idx_to_seen_q_op_infos.items():
self._maybe_insert_input_observers(seen_q_op_info)
self._maybe_insert_output_observers(seen_q_op_info, root_module)
def get_output_observer_from_fqn(self, fqn: str) -> Optional[torch.nn.Module]:
for _, seen_q_op_info in self.idx_to_seen_q_op_infos.items():
if seen_q_op_info.fqn != fqn:
continue
output_tensor_id = seen_q_op_info.output_tensor_infos[0].id
if str(output_tensor_id) in self.tensor_id_to_observer:
return self.tensor_id_to_observer[str(output_tensor_id)]
return None
# This is a hack to enable nn.Sequential to properly work with
# this class.
def forward(self, x):
raise NotImplementedError(
"Calling AutoQuantizationState.forward is not supported"
)
# return x
class AutoQuantizationStateModuleDict(torch.nn.ModuleDict):
pass
def init_model_quant_state(model, module_id_to_fqn, configure):
# Create a list before iterating because we are adding new
# named modules inside the loop.
named_modules = list(model.named_modules())
# Record module instances which are leaves or children of leaves
leaves = set()
for fqn, child in named_modules:
if is_leaf(child):
for _, child_child in child.named_modules():
leaves.add(child_child)
model._fqn_to_auto_quant_state_map = AutoQuantizationStateModuleDict()
for fqn, v in named_modules:
# fqn is the global FQN, i.e. 'foo.bar.baz'
# v is the module instance
#
# we need to associate the global FQN with SeenOp
# for modules, this is the module FQN
# for functions, this is the parent module FQN
module_id_to_fqn[id(v)] = fqn
if v in leaves:
continue
auto_quant_state = AutoQuantizationState(fqn, configure)
# The code below registers the auto_quant_state object
# of the child in the module hierarchy of the parent,
# and adds the auto_quant_state object to the child
# with a raw __setattr__, without registering it in
# the module hierarchy of the child.
# This is solving the problem of both storing extra state
# (observers) as well as not modifying the meaning of user
# code in child modules which iterates over all module
# children.
#
# This narrows down the issue of dynamically adding
# children to only affect the top level module and not
# the children.
# On the parent, register this module in the FQN map
fqn_to_use_for_key = get_fqn_valid_for_module_dict_key(fqn)
model._fqn_to_auto_quant_state_map[fqn_to_use_for_key] = auto_quant_state
# On the child, manually set the attribute without
# going through the `torch.nn.Module.__setattr__`
# function, to prevent this object from appearing in
# the child's module hierarchy.
object.__setattr__(v, "_auto_quant_state", auto_quant_state)
| 46,983 | 40.689441 | 126 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.