Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py +182 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py +171 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py +98 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/utils.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/x86_inductor_quantizer.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/utils.py +83 -0
- videochat2/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc +3 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h +59 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h +129 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h +123 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h +290 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h +50 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h +110 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h +113 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h +424 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h +242 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h +48 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h +32 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h +28 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h +315 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h +81 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h +115 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h +133 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h +116 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/ConstexprCrc.h +130 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Exception.h +714 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwned.h +140 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h +75 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h +274 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h +279 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h +148 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h +138 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Gauge.h +48 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Half.h +535 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h +77 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Lazy.h +120 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h +223 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Logging.h +370 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/NetworkFlow.h +54 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/ParallelGuard.h +20 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Registry.h +326 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/ScopeExit.h +50 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h +87 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h +211 -0
- videochat2/lib/python3.10/site-packages/torch/include/c10/util/Synchronized.h +61 -0
.gitattributes
CHANGED
|
@@ -913,3 +913,4 @@ videollama2/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs
|
|
| 913 |
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 914 |
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 915 |
videochat2/lib/python3.10/site-packages/torch/lib/libc10_cuda.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 913 |
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 914 |
videochat2/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 915 |
videochat2/lib/python3.10/site-packages/torch/lib/libc10_cuda.so filter=lfs diff=lfs merge=lfs -text
|
| 916 |
+
videochat2/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import operator
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.ao.quantization.backend_config import (
|
| 6 |
+
BackendConfig,
|
| 7 |
+
BackendPatternConfig,
|
| 8 |
+
DTypeConfig,
|
| 9 |
+
ObservationType,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
weighted_op_quint8_dtype_config = DTypeConfig(
|
| 14 |
+
input_dtype=torch.quint8,
|
| 15 |
+
output_dtype=torch.quint8,
|
| 16 |
+
weight_dtype=torch.qint8,
|
| 17 |
+
bias_dtype=torch.float,
|
| 18 |
+
)
|
| 19 |
+
from typing import List
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_linear_configs():
|
| 23 |
+
linear_configs = []
|
| 24 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
| 25 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
| 26 |
+
|
| 27 |
+
# TODO: need to fix the way we insert observers for this pattern
|
| 28 |
+
# should be solved in the new fusion API
|
| 29 |
+
# reason that this doesn't work: the pattern is a bit complicated and we don't
|
| 30 |
+
# have a way to specify which input of the pattern we would like to observe
|
| 31 |
+
# pattern:
|
| 32 |
+
# bias input weight
|
| 33 |
+
# \ | /
|
| 34 |
+
# \ | t
|
| 35 |
+
# \ | /
|
| 36 |
+
# addmm
|
| 37 |
+
# we want to observe "weight" as weight, but there is not way to convey this
|
| 38 |
+
# information with current pattern language
|
| 39 |
+
#
|
| 40 |
+
# right now:
|
| 41 |
+
# original:
|
| 42 |
+
# weight - t \
|
| 43 |
+
# input - addmm
|
| 44 |
+
# observed (no hack):
|
| 45 |
+
# weight - t - observer \
|
| 46 |
+
# input - observer - addmm
|
| 47 |
+
# target:
|
| 48 |
+
# weight - observer - t \
|
| 49 |
+
# input - observer - addmm
|
| 50 |
+
|
| 51 |
+
# def root_node_getter(node_pattern):
|
| 52 |
+
# addmm, bias, act, weight = node_pattern
|
| 53 |
+
# return addmm
|
| 54 |
+
|
| 55 |
+
# linear_configs.append(
|
| 56 |
+
# BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.aten.t.default))
|
| 57 |
+
# .set_observation_type(observation_type) # noqa: E131
|
| 58 |
+
# .set_dtype_configs(dtype_configs)
|
| 59 |
+
# ._set_root_node_getter(root_node_getter))
|
| 60 |
+
|
| 61 |
+
linear_configs.append(
|
| 62 |
+
BackendPatternConfig(torch.ops.aten.addmm.default)
|
| 63 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 64 |
+
.set_dtype_configs(dtype_configs)
|
| 65 |
+
._set_input_type_to_index({"weight": 2, "bias": 0})
|
| 66 |
+
)
|
| 67 |
+
# linear is decomposed to `t - mm` if bias is not present
|
| 68 |
+
linear_configs.append(
|
| 69 |
+
BackendPatternConfig(torch.ops.aten.mm.default)
|
| 70 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 71 |
+
.set_dtype_configs(dtype_configs)
|
| 72 |
+
._set_input_type_to_index({"weight": 1})
|
| 73 |
+
)
|
| 74 |
+
return linear_configs
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_conv_configs():
|
| 78 |
+
conv_configs = []
|
| 79 |
+
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
|
| 80 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
| 81 |
+
conv_configs.append(
|
| 82 |
+
BackendPatternConfig(torch.ops.aten.convolution.default)
|
| 83 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 84 |
+
.set_dtype_configs(dtype_configs)
|
| 85 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
| 86 |
+
)
|
| 87 |
+
conv_configs.append(
|
| 88 |
+
BackendPatternConfig(
|
| 89 |
+
(torch.ops.aten.convolution.default, torch.ops.aten.relu.default)
|
| 90 |
+
)
|
| 91 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 92 |
+
.set_dtype_configs(dtype_configs)
|
| 93 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
| 94 |
+
)
|
| 95 |
+
# TODO: remove when functionalization is supported in PT2 mode
|
| 96 |
+
conv_configs.append(
|
| 97 |
+
BackendPatternConfig(
|
| 98 |
+
(torch.ops.aten.convolution.default, torch.ops.aten.relu_.default)
|
| 99 |
+
)
|
| 100 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 101 |
+
.set_dtype_configs(dtype_configs)
|
| 102 |
+
._set_input_type_to_index({"weight": 1, "bias": 2})
|
| 103 |
+
)
|
| 104 |
+
return conv_configs
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def get_pooling_configs():
|
| 108 |
+
backend_pattern_configs = []
|
| 109 |
+
observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
|
| 110 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
| 111 |
+
|
| 112 |
+
def root_node_getter(node_pattern):
|
| 113 |
+
getitem, maxpool, index = node_pattern
|
| 114 |
+
return maxpool
|
| 115 |
+
|
| 116 |
+
backend_pattern_configs.append(
|
| 117 |
+
BackendPatternConfig()
|
| 118 |
+
._set_pattern_complex_format(
|
| 119 |
+
(operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0)
|
| 120 |
+
)
|
| 121 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 122 |
+
.set_dtype_configs(dtype_configs)
|
| 123 |
+
._set_root_node_getter(root_node_getter)
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
return backend_pattern_configs
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def get_relu_configs():
|
| 130 |
+
backend_pattern_configs = []
|
| 131 |
+
observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
|
| 132 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
| 133 |
+
backend_pattern_configs.append(
|
| 134 |
+
BackendPatternConfig(torch.ops.aten.relu.default)
|
| 135 |
+
.set_observation_type(observation_type) # noqa: E131
|
| 136 |
+
.set_dtype_configs(dtype_configs)
|
| 137 |
+
)
|
| 138 |
+
return backend_pattern_configs
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def get_binary_op_configs():
|
| 142 |
+
binary_op_configs: List[BackendPatternConfig] = []
|
| 143 |
+
dtype_configs = [weighted_op_quint8_dtype_config]
|
| 144 |
+
num_tensor_args_to_observation_type_mapping = {
|
| 145 |
+
# TODO: this is not used right now since we have extra check in prepare
|
| 146 |
+
# will need to change this to NO_OBSERVER later after we implemented
|
| 147 |
+
# Tensor dtype inference properly
|
| 148 |
+
0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
| 149 |
+
1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
|
| 150 |
+
2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
|
| 151 |
+
}
|
| 152 |
+
for op_with_quantized_bop_scalar_variant in [
|
| 153 |
+
torch.ops.aten.add.Tensor,
|
| 154 |
+
torch.ops.aten.add_.Tensor,
|
| 155 |
+
]:
|
| 156 |
+
bop_patterns = [
|
| 157 |
+
(op_with_quantized_bop_scalar_variant, torch.ops.aten.relu.default),
|
| 158 |
+
op_with_quantized_bop_scalar_variant,
|
| 159 |
+
# TODO: remove when functionalization is supported in pt2_mode
|
| 160 |
+
(op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default),
|
| 161 |
+
]
|
| 162 |
+
for bop_pattern in bop_patterns:
|
| 163 |
+
binary_op_configs.append(
|
| 164 |
+
BackendPatternConfig(bop_pattern)
|
| 165 |
+
.set_dtype_configs(dtype_configs) # noqa: E131
|
| 166 |
+
._set_num_tensor_args_to_observation_type(
|
| 167 |
+
num_tensor_args_to_observation_type_mapping
|
| 168 |
+
)
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
return binary_op_configs
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def get_qnnpack_pt2e_backend_config():
|
| 175 |
+
return (
|
| 176 |
+
BackendConfig("qnnpack_pytorch_2.0_export")
|
| 177 |
+
.set_backend_pattern_configs(get_linear_configs())
|
| 178 |
+
.set_backend_pattern_configs(get_binary_op_configs())
|
| 179 |
+
.set_backend_pattern_configs(get_conv_configs())
|
| 180 |
+
.set_backend_pattern_configs(get_pooling_configs())
|
| 181 |
+
.set_backend_pattern_configs(get_relu_configs())
|
| 182 |
+
)
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from ._common_operator_config_utils import (
|
| 4 |
+
_get_binary_op_configs,
|
| 5 |
+
_get_bn_configs,
|
| 6 |
+
_get_cat_config,
|
| 7 |
+
_get_conv_configs,
|
| 8 |
+
_get_default_op_configs,
|
| 9 |
+
_get_embedding_op_configs,
|
| 10 |
+
_get_fixed_qparams_op_configs,
|
| 11 |
+
_get_linear_configs,
|
| 12 |
+
_get_rnn_op_configs,
|
| 13 |
+
_get_share_qparams_op_configs,
|
| 14 |
+
)
|
| 15 |
+
from .backend_config import BackendConfig, DTypeConfig, DTypeWithConstraints
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"get_qnnpack_backend_config",
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
# ===================
|
| 23 |
+
# | DTYPE CONFIGS |
|
| 24 |
+
# ===================
|
| 25 |
+
|
| 26 |
+
qnnpack_weighted_op_quint8_dtype_config = DTypeConfig(
|
| 27 |
+
input_dtype=torch.quint8,
|
| 28 |
+
output_dtype=torch.quint8,
|
| 29 |
+
weight_dtype=torch.qint8,
|
| 30 |
+
bias_dtype=torch.float,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
qnnpack_default_op_quint8_dtype_config = DTypeConfig(
|
| 34 |
+
input_dtype=torch.quint8,
|
| 35 |
+
output_dtype=torch.quint8,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
qnnpack_default_op_fp16_dtype_config = DTypeConfig(
|
| 39 |
+
input_dtype=torch.float16,
|
| 40 |
+
output_dtype=torch.float16,
|
| 41 |
+
weight_dtype=torch.float16,
|
| 42 |
+
bias_dtype=torch.float16,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
qnnpack_default_dynamic_int8_dtype_config = DTypeConfig(
|
| 46 |
+
input_dtype=torch.quint8,
|
| 47 |
+
output_dtype=torch.float,
|
| 48 |
+
weight_dtype=torch.qint8,
|
| 49 |
+
bias_dtype=torch.float,
|
| 50 |
+
is_dynamic=True,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
qnnpack_default_dynamic_float16_dtype_config = DTypeConfig(
|
| 54 |
+
input_dtype=torch.float16,
|
| 55 |
+
output_dtype=torch.float,
|
| 56 |
+
weight_dtype=torch.float16,
|
| 57 |
+
bias_dtype=torch.float,
|
| 58 |
+
is_dynamic=True,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
qnnpack_weight_only_quint8_dtype_config = DTypeConfig(
|
| 62 |
+
input_dtype=torch.float,
|
| 63 |
+
output_dtype=torch.float,
|
| 64 |
+
weight_dtype=torch.quint8,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
qnnpack_weight_only_quint4x2_dtype_config = DTypeConfig(
|
| 68 |
+
input_dtype=torch.float,
|
| 69 |
+
output_dtype=torch.float,
|
| 70 |
+
weight_dtype=torch.quint4x2,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# xnnpack compatible dtype configs
|
| 74 |
+
|
| 75 |
+
# We restrict scale values to be 2 ** -12 to ensure the
|
| 76 |
+
# requantization scale never falls below the xnnpack lower
|
| 77 |
+
# threshold. Additionally, for qint8 weight, we restrict
|
| 78 |
+
# the quantization values to [-127, +127], excluding -128.
|
| 79 |
+
# For more detail, refer to the description of
|
| 80 |
+
# `default_symmetric_qnnpack_qconfig`.
|
| 81 |
+
|
| 82 |
+
# TODO: add additional restriction on qscheme to ensure it
|
| 83 |
+
# is either per_tensor_symmetric or per_channel_symmetric
|
| 84 |
+
|
| 85 |
+
qnnpack_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints(
|
| 86 |
+
dtype=torch.qint8,
|
| 87 |
+
scale_min_lower_bound=2**-12,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints(
|
| 91 |
+
dtype=torch.qint8,
|
| 92 |
+
quant_min_lower_bound=-127,
|
| 93 |
+
quant_max_upper_bound=127,
|
| 94 |
+
scale_min_lower_bound=2**-12,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config = DTypeConfig(
|
| 98 |
+
input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
| 99 |
+
output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
| 100 |
+
weight_dtype=qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12,
|
| 101 |
+
bias_dtype=torch.float,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
qnnpack_default_op_qint8_symmetric_dtype_config = DTypeConfig(
|
| 105 |
+
input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
| 106 |
+
output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# =====================
|
| 111 |
+
# | BACKEND CONFIGS |
|
| 112 |
+
# =====================
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_qnnpack_backend_config() -> BackendConfig:
|
| 116 |
+
"""
|
| 117 |
+
Return the `BackendConfig` for PyTorch's native QNNPACK backend.
|
| 118 |
+
"""
|
| 119 |
+
conv_dtype_configs = [
|
| 120 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
| 121 |
+
qnnpack_weighted_op_quint8_dtype_config,
|
| 122 |
+
]
|
| 123 |
+
linear_dtype_configs = [
|
| 124 |
+
qnnpack_weighted_op_qint8_symmetric_dtype_config,
|
| 125 |
+
qnnpack_weighted_op_quint8_dtype_config,
|
| 126 |
+
qnnpack_default_dynamic_int8_dtype_config,
|
| 127 |
+
qnnpack_default_dynamic_float16_dtype_config,
|
| 128 |
+
]
|
| 129 |
+
binary_op_dtype_configs = [
|
| 130 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
| 131 |
+
qnnpack_default_op_quint8_dtype_config,
|
| 132 |
+
]
|
| 133 |
+
default_op_dtype_configs = [
|
| 134 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
| 135 |
+
qnnpack_default_op_quint8_dtype_config,
|
| 136 |
+
]
|
| 137 |
+
fixed_qparams_op_dtype_configs = [
|
| 138 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
| 139 |
+
qnnpack_default_op_quint8_dtype_config,
|
| 140 |
+
]
|
| 141 |
+
share_qparams_op_dtype_configs = [
|
| 142 |
+
qnnpack_default_op_qint8_symmetric_dtype_config,
|
| 143 |
+
qnnpack_default_op_quint8_dtype_config,
|
| 144 |
+
]
|
| 145 |
+
rnn_op_dtype_configs = [
|
| 146 |
+
qnnpack_default_dynamic_int8_dtype_config,
|
| 147 |
+
qnnpack_default_dynamic_float16_dtype_config,
|
| 148 |
+
]
|
| 149 |
+
embedding_op_dtype_configs = [
|
| 150 |
+
qnnpack_weight_only_quint8_dtype_config,
|
| 151 |
+
qnnpack_weight_only_quint4x2_dtype_config,
|
| 152 |
+
]
|
| 153 |
+
return (
|
| 154 |
+
BackendConfig("qnnpack")
|
| 155 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs))
|
| 156 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs))
|
| 157 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs))
|
| 158 |
+
.set_backend_pattern_config(_get_cat_config(default_op_dtype_configs))
|
| 159 |
+
.set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs))
|
| 160 |
+
.set_backend_pattern_configs(
|
| 161 |
+
_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)
|
| 162 |
+
)
|
| 163 |
+
.set_backend_pattern_configs(
|
| 164 |
+
_get_share_qparams_op_configs(share_qparams_op_dtype_configs)
|
| 165 |
+
)
|
| 166 |
+
.set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs))
|
| 167 |
+
.set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs))
|
| 168 |
+
.set_backend_pattern_configs(
|
| 169 |
+
_get_embedding_op_configs(embedding_op_dtype_configs)
|
| 170 |
+
)
|
| 171 |
+
)
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._common_operator_config_utils import (
|
| 5 |
+
_get_binary_op_configs,
|
| 6 |
+
_get_conv_configs,
|
| 7 |
+
_get_linear_configs,
|
| 8 |
+
_get_share_qparams_op_configs,
|
| 9 |
+
_get_tensor_info_op_configs,
|
| 10 |
+
)
|
| 11 |
+
from .backend_config import (
|
| 12 |
+
BackendConfig,
|
| 13 |
+
BackendPatternConfig,
|
| 14 |
+
DTypeConfig,
|
| 15 |
+
ObservationType,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"get_tensorrt_backend_config",
|
| 21 |
+
"get_tensorrt_backend_config_dict",
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_tensorrt_backend_config() -> BackendConfig:
|
| 26 |
+
"""
|
| 27 |
+
Return the `BackendConfig` for the TensorRT backend.
|
| 28 |
+
NOTE: Current api will change in the future, it's just to unblock experimentation for
|
| 29 |
+
new backends, please don't use it right now.
|
| 30 |
+
TODO: add a README when it's more stable
|
| 31 |
+
"""
|
| 32 |
+
# dtype configs
|
| 33 |
+
weighted_op_qint8_dtype_config = DTypeConfig(
|
| 34 |
+
input_dtype=torch.qint8,
|
| 35 |
+
output_dtype=torch.qint8,
|
| 36 |
+
weight_dtype=torch.qint8,
|
| 37 |
+
bias_dtype=torch.float,
|
| 38 |
+
)
|
| 39 |
+
non_weighted_op_qint8_dtype_config = DTypeConfig(
|
| 40 |
+
input_dtype=torch.qint8,
|
| 41 |
+
output_dtype=torch.qint8,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
addmm_config = (
|
| 45 |
+
BackendPatternConfig(torch.addmm)
|
| 46 |
+
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT)
|
| 47 |
+
.add_dtype_config(weighted_op_qint8_dtype_config)
|
| 48 |
+
._set_input_type_to_index(
|
| 49 |
+
{
|
| 50 |
+
"bias": 0,
|
| 51 |
+
"input": 1,
|
| 52 |
+
"weight": 2,
|
| 53 |
+
}
|
| 54 |
+
)
|
| 55 |
+
)
|
| 56 |
+
cat_config = (
|
| 57 |
+
BackendPatternConfig(torch.cat)
|
| 58 |
+
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
|
| 59 |
+
.add_dtype_config(non_weighted_op_qint8_dtype_config)
|
| 60 |
+
)
|
| 61 |
+
conv_dtype_configs = [
|
| 62 |
+
weighted_op_qint8_dtype_config,
|
| 63 |
+
]
|
| 64 |
+
linear_dtype_configs = [
|
| 65 |
+
weighted_op_qint8_dtype_config,
|
| 66 |
+
]
|
| 67 |
+
binary_op_dtype_configs = [
|
| 68 |
+
weighted_op_qint8_dtype_config,
|
| 69 |
+
]
|
| 70 |
+
share_qparams_op_dtype_configs = [
|
| 71 |
+
non_weighted_op_qint8_dtype_config,
|
| 72 |
+
]
|
| 73 |
+
tensor_info_op_dtype_configs = [
|
| 74 |
+
non_weighted_op_qint8_dtype_config,
|
| 75 |
+
]
|
| 76 |
+
# there might be things not supported in fx2trt, but it will error out
|
| 77 |
+
# during fx2trt conversion and can support them after that
|
| 78 |
+
return (
|
| 79 |
+
BackendConfig("tensorrt")
|
| 80 |
+
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs))
|
| 81 |
+
.set_backend_pattern_config(addmm_config)
|
| 82 |
+
.set_backend_pattern_config(cat_config)
|
| 83 |
+
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs))
|
| 84 |
+
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs))
|
| 85 |
+
.set_backend_pattern_configs(
|
| 86 |
+
_get_share_qparams_op_configs(share_qparams_op_dtype_configs)
|
| 87 |
+
)
|
| 88 |
+
.set_backend_pattern_configs(
|
| 89 |
+
_get_tensor_info_op_configs(tensor_info_op_dtype_configs)
|
| 90 |
+
)
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_tensorrt_backend_config_dict():
|
| 95 |
+
"""
|
| 96 |
+
Return the `BackendConfig` for the TensorRT backend in dictionary form.
|
| 97 |
+
"""
|
| 98 |
+
return get_tensorrt_backend_config().to_dict()
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (509 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/quantizer.cpython-310.pyc
ADDED
|
Binary file (5.09 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.4 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/x86_inductor_quantizer.cpython-310.pyc
ADDED
|
Binary file (37.6 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/__pycache__/xnnpack_quantizer_utils.cpython-310.pyc
ADDED
|
Binary file (21.4 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/torch/ao/quantization/quantizer/utils.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
from torch.ao.quantization.pt2e.utils import _is_sym_size_node
|
| 5 |
+
from torch.ao.quantization.quantizer.quantizer import QuantizationAnnotation
|
| 6 |
+
from torch.fx import Node
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _annotate_input_qspec_map(node: Node, input_node: Node, qspec):
|
| 10 |
+
quantization_annotation = node.meta.get(
|
| 11 |
+
"quantization_annotation", QuantizationAnnotation()
|
| 12 |
+
)
|
| 13 |
+
if quantization_annotation.input_qspec_map is None:
|
| 14 |
+
quantization_annotation.input_qspec_map = {}
|
| 15 |
+
quantization_annotation.input_qspec_map[input_node] = qspec
|
| 16 |
+
node.meta["quantization_annotation"] = quantization_annotation
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _annotate_output_qspec(node: Node, qspec):
|
| 20 |
+
quantization_annotation = node.meta.get(
|
| 21 |
+
"quantization_annotation", QuantizationAnnotation()
|
| 22 |
+
)
|
| 23 |
+
quantization_annotation.output_qspec = qspec
|
| 24 |
+
node.meta["quantization_annotation"] = quantization_annotation
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _node_only_used_for_sym_size(node: Node, partition_nodes: List[Node]):
|
| 28 |
+
"""
|
| 29 |
+
This utility is used to handle cases when dynami_shape=True tracing leads
|
| 30 |
+
to symint nodes in the pattern of linear module. In those cases, we need to
|
| 31 |
+
distinguish between the nodes that are in input for just extracting value of
|
| 32 |
+
some dimentions (and symint nodes) vs. the one that is activation.
|
| 33 |
+
For example:
|
| 34 |
+
graph(x, y, weight):
|
| 35 |
+
size_0 = torch.ops.aten.sym_size([x], [0])
|
| 36 |
+
size_1 = torch.ops.aten.sym_size([y], [1])
|
| 37 |
+
view_size = size_0 * size_1
|
| 38 |
+
size_3 = torch.ops.aten.sym_size([x], [2])
|
| 39 |
+
vie_out = torch.ops.aten.view(x, [view_size, size_3])
|
| 40 |
+
return mm(view_out, weight)
|
| 41 |
+
In the example above y node is not actual input. It exist only to extract size_1
|
| 42 |
+
"""
|
| 43 |
+
if _is_sym_size_node(node):
|
| 44 |
+
return True
|
| 45 |
+
|
| 46 |
+
return all(
|
| 47 |
+
((user not in partition_nodes) or _is_sym_size_node(user))
|
| 48 |
+
for user in node.users
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _get_module_name_filter(module_name: str):
|
| 53 |
+
"""Get the module_name_filter function for a given module name, the filter accepts
|
| 54 |
+
a node and checks if the node comes from a module that has certain module name
|
| 55 |
+
|
| 56 |
+
For example:
|
| 57 |
+
node: linear_op = call_function[...](...) # comes from a module with name blocks.sub.linear1
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
>> module_name_filter = _get_module_name_filter("blocks.sub")
|
| 61 |
+
>> print(module_name_filter(node))
|
| 62 |
+
True # the node is from "blocks.sub" based on the fully qualified name "blocks.sub.linear1"
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
def module_name_filter(n: Node) -> bool:
|
| 66 |
+
# example: {
|
| 67 |
+
# 'L__self___sub': ("L['self'].sub", <class '....Sub'>),
|
| 68 |
+
# 'L__self___sub_linear': ("L['self'].sub.linear", <class 'torch.nn.modules.linear.Linear'>)
|
| 69 |
+
# }
|
| 70 |
+
# get_attr nodes doesn't have nn_module_stack?
|
| 71 |
+
nn_module_stack = n.meta.get("nn_module_stack", {})
|
| 72 |
+
|
| 73 |
+
def _normalize_path(n):
|
| 74 |
+
prefix = 0
|
| 75 |
+
# TODO This is non standard behavior and should be removed when we migrate off capture_pre_autograd_graph.
|
| 76 |
+
if n.startswith("L['self']."):
|
| 77 |
+
prefix = len("L['self'].")
|
| 78 |
+
return n[prefix:]
|
| 79 |
+
|
| 80 |
+
names = [_normalize_path(n) for n, _ in nn_module_stack.values()]
|
| 81 |
+
return module_name in names
|
| 82 |
+
|
| 83 |
+
return module_name_filter
|
videochat2/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67258c30b54aed4ef977190f56b943682ffcecf9ed58de2c35e8800037664930
|
| 3 |
+
size 144048
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <cstring>
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <unordered_map>
|
| 7 |
+
|
| 8 |
+
#include <c10/core/Allocator.h>
|
| 9 |
+
#include <c10/macros/Export.h>
|
| 10 |
+
#include <c10/util/Flags.h>
|
| 11 |
+
|
| 12 |
+
// TODO: rename to c10
|
| 13 |
+
C10_DECLARE_bool(caffe2_report_cpu_memory_usage);
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
|
| 17 |
+
using MemoryDeleter = void (*)(void*);
|
| 18 |
+
|
| 19 |
+
// A helper function that is basically doing nothing.
|
| 20 |
+
C10_API void NoDelete(void*);
|
| 21 |
+
|
| 22 |
+
// A simple struct that is used to report C10's memory allocation,
|
| 23 |
+
// deallocation status and out-of-memory events to the profiler
|
| 24 |
+
class C10_API ProfiledCPUMemoryReporter {
|
| 25 |
+
public:
|
| 26 |
+
ProfiledCPUMemoryReporter() = default;
|
| 27 |
+
void New(void* ptr, size_t nbytes);
|
| 28 |
+
void OutOfMemory(size_t nbytes);
|
| 29 |
+
void Delete(void* ptr);
|
| 30 |
+
|
| 31 |
+
private:
|
| 32 |
+
std::mutex mutex_;
|
| 33 |
+
std::unordered_map<void*, size_t> size_table_;
|
| 34 |
+
size_t allocated_ = 0;
|
| 35 |
+
size_t log_cnt_ = 0;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter();
|
| 39 |
+
|
| 40 |
+
// Get the CPU Allocator.
|
| 41 |
+
C10_API at::Allocator* GetCPUAllocator();
|
| 42 |
+
// Sets the CPU allocator to the given allocator: the caller gives away the
|
| 43 |
+
// ownership of the pointer.
|
| 44 |
+
C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0);
|
| 45 |
+
|
| 46 |
+
// Get the Default CPU Allocator
|
| 47 |
+
C10_API at::Allocator* GetDefaultCPUAllocator();
|
| 48 |
+
|
| 49 |
+
// Get the Default Mobile CPU Allocator
|
| 50 |
+
C10_API at::Allocator* GetDefaultMobileCPUAllocator();
|
| 51 |
+
|
| 52 |
+
// The CPUCachingAllocator is experimental and might disappear in the future.
|
| 53 |
+
// The only place that uses it is in StaticRuntime.
|
| 54 |
+
// Set the CPU Caching Allocator
|
| 55 |
+
C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0);
|
| 56 |
+
// Get the CPU Caching Allocator
|
| 57 |
+
C10_API Allocator* GetCPUCachingAllocator();
|
| 58 |
+
|
| 59 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/SymBool.h>
|
| 3 |
+
#include <c10/core/SymInt.h>
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
#include <c10/util/SmallVector.h>
|
| 6 |
+
#include <c10/util/irange.h>
|
| 7 |
+
|
| 8 |
+
#include <algorithm>
|
| 9 |
+
#include <cstdint>
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
|
| 13 |
+
template <typename T>
|
| 14 |
+
bool _compute_contiguous(ArrayRef<T> sizes, ArrayRef<T> strides, T numel) {
|
| 15 |
+
bool is_contiguous = true;
|
| 16 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(numel, 0))) {
|
| 17 |
+
return is_contiguous;
|
| 18 |
+
}
|
| 19 |
+
T z = 1;
|
| 20 |
+
// NB: make sure we do signed arithmetic
|
| 21 |
+
for (int64_t d = int64_t(sizes.size()) - 1; d >= 0; d--) {
|
| 22 |
+
const auto& size_d = sizes[d];
|
| 23 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
|
| 24 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(strides[d], z))) {
|
| 25 |
+
z *= size_d;
|
| 26 |
+
} else {
|
| 27 |
+
is_contiguous = false;
|
| 28 |
+
break;
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
return is_contiguous;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
template <typename T>
|
| 36 |
+
bool _compute_channels_last_contiguous_2d(
|
| 37 |
+
ArrayRef<T> sizes,
|
| 38 |
+
ArrayRef<T> strides) {
|
| 39 |
+
// Please don't combine these code, constant array is used here to let
|
| 40 |
+
// compiler fully unroll the loop to get better performance
|
| 41 |
+
switch (sizes.size()) {
|
| 42 |
+
case 4: {
|
| 43 |
+
T expected = 1;
|
| 44 |
+
for (auto& d : {1, 3, 2, 0}) {
|
| 45 |
+
const auto& size_d = sizes[d];
|
| 46 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
|
| 47 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) {
|
| 48 |
+
return false;
|
| 49 |
+
}
|
| 50 |
+
expected *= size_d;
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
return true;
|
| 54 |
+
}
|
| 55 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 56 |
+
case 3:
|
| 57 |
+
// TODO dim == 3 case will be enabled once it is fully tested
|
| 58 |
+
return false;
|
| 59 |
+
default:
|
| 60 |
+
return false;
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
template <typename T>
|
| 65 |
+
bool _compute_channels_last_contiguous_3d(
|
| 66 |
+
ArrayRef<T> sizes,
|
| 67 |
+
ArrayRef<T> strides) {
|
| 68 |
+
// Please don't combine these code, constant array is used here to let
|
| 69 |
+
// compiler fully unroll the loop to get better performance
|
| 70 |
+
switch (sizes.size()) {
|
| 71 |
+
case 5: {
|
| 72 |
+
T expected = 1;
|
| 73 |
+
for (auto& d : {1, 4, 3, 2, 0}) {
|
| 74 |
+
const auto& size_d = sizes[d];
|
| 75 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
|
| 76 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) {
|
| 77 |
+
return false;
|
| 78 |
+
}
|
| 79 |
+
expected *= size_d;
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
return true;
|
| 83 |
+
}
|
| 84 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 85 |
+
case 4:
|
| 86 |
+
// TODO dim == 4 case will be enabled once it is fully tested
|
| 87 |
+
return false;
|
| 88 |
+
default:
|
| 89 |
+
return false;
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template <typename T>
|
| 94 |
+
bool _compute_non_overlapping_and_dense(
|
| 95 |
+
ArrayRef<T> sizes,
|
| 96 |
+
ArrayRef<T> strides) {
|
| 97 |
+
auto dim = sizes.size();
|
| 98 |
+
if (dim == 1) {
|
| 99 |
+
return sizes[0] < 2 || strides[0] == 1;
|
| 100 |
+
}
|
| 101 |
+
SmallVector<int64_t, 5> perm;
|
| 102 |
+
perm.resize(dim);
|
| 103 |
+
for (const auto i : c10::irange(dim)) {
|
| 104 |
+
perm[i] = i;
|
| 105 |
+
}
|
| 106 |
+
// Sort by strides, leaving 0 and 1 sized dims at the end of the array
|
| 107 |
+
std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) {
|
| 108 |
+
if (sizes[a] < 2) {
|
| 109 |
+
return false;
|
| 110 |
+
} else if (sizes[b] < 2) {
|
| 111 |
+
return true;
|
| 112 |
+
}
|
| 113 |
+
return strides[a] < strides[b];
|
| 114 |
+
});
|
| 115 |
+
T require_stride = 1;
|
| 116 |
+
for (const auto i : c10::irange(dim)) {
|
| 117 |
+
const auto& size_perm_i = sizes[perm[i]];
|
| 118 |
+
if (size_perm_i < 2) {
|
| 119 |
+
return true;
|
| 120 |
+
}
|
| 121 |
+
if (strides[perm[i]] != require_stride) {
|
| 122 |
+
return false;
|
| 123 |
+
}
|
| 124 |
+
require_stride *= size_perm_i;
|
| 125 |
+
}
|
| 126 |
+
return true;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// This is directly synchronized with caffe2/proto/caffe2.proto, but
|
| 4 |
+
// doesn't require me to figure out how to get Protobuf headers into
|
| 5 |
+
// ATen/core (which would require a lot more build system hacking.)
|
| 6 |
+
// If you modify me, keep me synchronized with that file.
|
| 7 |
+
|
| 8 |
+
#include <c10/macros/Export.h>
|
| 9 |
+
|
| 10 |
+
#include <cstddef>
|
| 11 |
+
#include <cstdint>
|
| 12 |
+
#include <functional>
|
| 13 |
+
#include <ostream>
|
| 14 |
+
#include <string>
|
| 15 |
+
|
| 16 |
+
namespace c10 {
|
| 17 |
+
|
| 18 |
+
// These contains all device types that also have a BackendComponent
|
| 19 |
+
// and therefore participate in per-backend functionality dispatch keys.
|
| 20 |
+
// This is most backends except PrivateUse2 and PrivateUse3
|
| 21 |
+
#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \
|
| 22 |
+
_(CPU, extra) \
|
| 23 |
+
_(CUDA, extra) \
|
| 24 |
+
_(HIP, extra) \
|
| 25 |
+
_(XLA, extra) \
|
| 26 |
+
_(MPS, extra) \
|
| 27 |
+
_(IPU, extra) \
|
| 28 |
+
_(XPU, extra) \
|
| 29 |
+
_(HPU, extra) \
|
| 30 |
+
_(VE, extra) \
|
| 31 |
+
_(Lazy, extra) \
|
| 32 |
+
_(Meta, extra) \
|
| 33 |
+
_(MTIA, extra) \
|
| 34 |
+
_(PrivateUse1, extra)
|
| 35 |
+
|
| 36 |
+
enum class DeviceType : int8_t {
|
| 37 |
+
CPU = 0,
|
| 38 |
+
CUDA = 1, // CUDA.
|
| 39 |
+
MKLDNN = 2, // Reserved for explicit MKLDNN
|
| 40 |
+
OPENGL = 3, // OpenGL
|
| 41 |
+
OPENCL = 4, // OpenCL
|
| 42 |
+
IDEEP = 5, // IDEEP.
|
| 43 |
+
HIP = 6, // AMD HIP
|
| 44 |
+
FPGA = 7, // FPGA
|
| 45 |
+
MAIA = 8, // ONNX Runtime / Microsoft
|
| 46 |
+
XLA = 9, // XLA / TPU
|
| 47 |
+
Vulkan = 10, // Vulkan
|
| 48 |
+
Metal = 11, // Metal
|
| 49 |
+
XPU = 12, // XPU
|
| 50 |
+
MPS = 13, // MPS
|
| 51 |
+
Meta = 14, // Meta (tensors with no data)
|
| 52 |
+
HPU = 15, // HPU / HABANA
|
| 53 |
+
VE = 16, // SX-Aurora / NEC
|
| 54 |
+
Lazy = 17, // Lazy Tensors
|
| 55 |
+
IPU = 18, // Graphcore IPU
|
| 56 |
+
MTIA = 19, // Meta training and inference devices
|
| 57 |
+
PrivateUse1 = 20, // PrivateUse1 device
|
| 58 |
+
// NB: If you add more devices:
|
| 59 |
+
// - Change the implementations of DeviceTypeName and isValidDeviceType
|
| 60 |
+
// in DeviceType.cpp
|
| 61 |
+
// - Change the number below
|
| 62 |
+
COMPILE_TIME_MAX_DEVICE_TYPES = 21,
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
constexpr DeviceType kCPU = DeviceType::CPU;
|
| 66 |
+
constexpr DeviceType kCUDA = DeviceType::CUDA;
|
| 67 |
+
constexpr DeviceType kHIP = DeviceType::HIP;
|
| 68 |
+
constexpr DeviceType kFPGA = DeviceType::FPGA;
|
| 69 |
+
constexpr DeviceType kMAIA = DeviceType::MAIA;
|
| 70 |
+
constexpr DeviceType kXLA = DeviceType::XLA;
|
| 71 |
+
constexpr DeviceType kMPS = DeviceType::MPS;
|
| 72 |
+
constexpr DeviceType kMeta = DeviceType::Meta;
|
| 73 |
+
constexpr DeviceType kVulkan = DeviceType::Vulkan;
|
| 74 |
+
constexpr DeviceType kMetal = DeviceType::Metal;
|
| 75 |
+
constexpr DeviceType kXPU = DeviceType::XPU;
|
| 76 |
+
constexpr DeviceType kHPU = DeviceType::HPU;
|
| 77 |
+
constexpr DeviceType kVE = DeviceType::VE;
|
| 78 |
+
constexpr DeviceType kLazy = DeviceType::Lazy;
|
| 79 |
+
constexpr DeviceType kIPU = DeviceType::IPU;
|
| 80 |
+
constexpr DeviceType kMTIA = DeviceType::MTIA;
|
| 81 |
+
constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1;
|
| 82 |
+
|
| 83 |
+
// define explicit int constant
|
| 84 |
+
constexpr int COMPILE_TIME_MAX_DEVICE_TYPES =
|
| 85 |
+
static_cast<int>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES);
|
| 86 |
+
|
| 87 |
+
static_assert(
|
| 88 |
+
COMPILE_TIME_MAX_DEVICE_TYPES <= 21,
|
| 89 |
+
"Hey! You seem to be adding a lot of new DeviceTypes. The intent was "
|
| 90 |
+
"for this constant to reflect the actual number of DeviceTypes we support "
|
| 91 |
+
"in PyTorch; it's important that this number is not too large as we "
|
| 92 |
+
"use this to allocate stack arrays in some places in our code. If you "
|
| 93 |
+
"are indeed just adding the 20th device type, feel free to change "
|
| 94 |
+
"the check to 32; but if you are adding some sort of extensible device "
|
| 95 |
+
"types registration, please be aware that you are affecting code that "
|
| 96 |
+
"this number is small. Try auditing uses of this constant.");
|
| 97 |
+
|
| 98 |
+
C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false);
|
| 99 |
+
|
| 100 |
+
C10_API bool isValidDeviceType(DeviceType d);
|
| 101 |
+
|
| 102 |
+
C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type);
|
| 103 |
+
|
| 104 |
+
C10_API void register_privateuse1_backend(const std::string& backend_name);
|
| 105 |
+
C10_API std::string get_privateuse1_backend(bool lower_case = true);
|
| 106 |
+
|
| 107 |
+
C10_API bool is_privateuse1_backend_registered();
|
| 108 |
+
|
| 109 |
+
} // namespace c10
|
| 110 |
+
|
| 111 |
+
namespace std {
|
| 112 |
+
template <>
|
| 113 |
+
struct hash<c10::DeviceType> {
|
| 114 |
+
std::size_t operator()(c10::DeviceType k) const {
|
| 115 |
+
return std::hash<int>()(static_cast<int>(k));
|
| 116 |
+
}
|
| 117 |
+
};
|
| 118 |
+
} // namespace std
|
| 119 |
+
|
| 120 |
+
namespace torch {
|
| 121 |
+
// NOLINTNEXTLINE(misc-unused-using-decls)
|
| 122 |
+
using c10::DeviceType;
|
| 123 |
+
} // namespace torch
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/ArrayRef.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
#include <ostream>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
// Memory format is not the property of a Tensor. It is the way to tell an
|
| 11 |
+
// operator how the result should be organized in memory and nothing more. That
|
| 12 |
+
// means memory format should never be used as return value for any tensor state
|
| 13 |
+
// interrogation functions (internally and externally).
|
| 14 |
+
//
|
| 15 |
+
// Possible options are:
|
| 16 |
+
// Preserve:
|
| 17 |
+
// If any of the input tensors is in channels_last format, operator output
|
| 18 |
+
// should be in channels_last format
|
| 19 |
+
//
|
| 20 |
+
// Contiguous:
|
| 21 |
+
// Regardless of input tensors format, the output should be contiguous
|
| 22 |
+
// Tensor.
|
| 23 |
+
//
|
| 24 |
+
// ChannelsLast:
|
| 25 |
+
// Regardless of input tensors format, the output should be in channels_last
|
| 26 |
+
// format.
|
| 27 |
+
|
| 28 |
+
namespace c10 {
|
| 29 |
+
enum class MemoryFormat : int8_t {
|
| 30 |
+
Contiguous,
|
| 31 |
+
Preserve,
|
| 32 |
+
ChannelsLast,
|
| 33 |
+
ChannelsLast3d,
|
| 34 |
+
NumOptions
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
// If you are seeing this, it means that this call site was not checked if
|
| 38 |
+
// the memory format could be preserved, and it was switched to old default
|
| 39 |
+
// behaviour of contiguous
|
| 40 |
+
#define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format()
|
| 41 |
+
|
| 42 |
+
inline MemoryFormat get_contiguous_memory_format() {
|
| 43 |
+
return MemoryFormat::Contiguous;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
inline std::ostream& operator<<(
|
| 47 |
+
std::ostream& stream,
|
| 48 |
+
at::MemoryFormat memory_format) {
|
| 49 |
+
switch (memory_format) {
|
| 50 |
+
case MemoryFormat::Preserve:
|
| 51 |
+
return stream << "Preserve";
|
| 52 |
+
case MemoryFormat::Contiguous:
|
| 53 |
+
return stream << "Contiguous";
|
| 54 |
+
case MemoryFormat::ChannelsLast:
|
| 55 |
+
return stream << "ChannelsLast";
|
| 56 |
+
case MemoryFormat::ChannelsLast3d:
|
| 57 |
+
return stream << "ChannelsLast3d";
|
| 58 |
+
default:
|
| 59 |
+
TORCH_CHECK(false, "Unknown memory format ", memory_format);
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
// Note: Hardcoded the channel last stride indices here to get better
|
| 64 |
+
// performance
|
| 65 |
+
template <typename T>
|
| 66 |
+
inline std::vector<T> get_channels_last_strides_2d(ArrayRef<T> sizes) {
|
| 67 |
+
std::vector<T> strides(sizes.size());
|
| 68 |
+
switch (sizes.size()) {
|
| 69 |
+
case 4:
|
| 70 |
+
strides[1] = 1;
|
| 71 |
+
strides[3] = sizes[1];
|
| 72 |
+
strides[2] = strides[3] * sizes[3];
|
| 73 |
+
strides[0] = strides[2] * sizes[2];
|
| 74 |
+
return strides;
|
| 75 |
+
case 3:
|
| 76 |
+
strides[0] = 1;
|
| 77 |
+
strides[2] = sizes[0];
|
| 78 |
+
strides[1] = strides[2] * sizes[2];
|
| 79 |
+
return strides;
|
| 80 |
+
default:
|
| 81 |
+
TORCH_INTERNAL_ASSERT(
|
| 82 |
+
false, "ChannelsLast2d doesn't support size ", sizes.size());
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
inline std::vector<int64_t> get_channels_last_strides_2d(IntArrayRef sizes) {
|
| 87 |
+
return get_channels_last_strides_2d<int64_t>(sizes);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
template <typename T>
|
| 91 |
+
std::vector<T> get_channels_last_strides_3d(ArrayRef<T> sizes) {
|
| 92 |
+
std::vector<T> strides(sizes.size());
|
| 93 |
+
switch (sizes.size()) {
|
| 94 |
+
case 5:
|
| 95 |
+
strides[1] = 1;
|
| 96 |
+
strides[4] = sizes[1];
|
| 97 |
+
strides[3] = strides[4] * sizes[4];
|
| 98 |
+
strides[2] = strides[3] * sizes[3];
|
| 99 |
+
strides[0] = strides[2] * sizes[2];
|
| 100 |
+
return strides;
|
| 101 |
+
case 4:
|
| 102 |
+
strides[0] = 1;
|
| 103 |
+
strides[3] = sizes[0];
|
| 104 |
+
strides[2] = strides[3] * sizes[3];
|
| 105 |
+
strides[1] = strides[2] * sizes[2];
|
| 106 |
+
return strides;
|
| 107 |
+
default:
|
| 108 |
+
TORCH_INTERNAL_ASSERT(
|
| 109 |
+
false, "ChannelsLast3d doesn't support size ", sizes.size());
|
| 110 |
+
}
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
inline std::vector<int64_t> get_channels_last_strides_3d(IntArrayRef sizes) {
|
| 114 |
+
return get_channels_last_strides_3d<int64_t>(sizes);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
// NOTE:
|
| 118 |
+
// Below are Helper functions for is_channels_last_strides_xd.
|
| 119 |
+
// 1. Please do not combine these helper functions, each helper function handles
|
| 120 |
+
// exactly one case of sizes + memory_format, by doing this, the strides indices
|
| 121 |
+
// will be a constant array and we can access it using constant index number,
|
| 122 |
+
// the compiler will fully unroll the loop on strides indices to gain a better
|
| 123 |
+
// performance.
|
| 124 |
+
// 2. No error check in helper function, caller ensures the correctness of the
|
| 125 |
+
// input
|
| 126 |
+
// 3. All helper functions have similar comments, only 1st helper function is
|
| 127 |
+
// commented here.
|
| 128 |
+
template <typename T>
|
| 129 |
+
inline bool is_channels_last_strides_2d_s4(
|
| 130 |
+
const ArrayRef<T> sizes,
|
| 131 |
+
const ArrayRef<T> strides) {
|
| 132 |
+
T min = 0;
|
| 133 |
+
// special case for trivial C dimension. default to NCHW
|
| 134 |
+
if (strides[1] == 0) {
|
| 135 |
+
return false;
|
| 136 |
+
}
|
| 137 |
+
// loop strides indices
|
| 138 |
+
for (auto& d : {1, 3, 2, 0}) {
|
| 139 |
+
if (sizes[d] == 0) {
|
| 140 |
+
return false;
|
| 141 |
+
}
|
| 142 |
+
if (strides[d] < min) {
|
| 143 |
+
return false;
|
| 144 |
+
}
|
| 145 |
+
// Fallback to NCHW as default layout for ambiguous cases
|
| 146 |
+
// This is the flaw of implicit memory_format from strides.
|
| 147 |
+
// N111 tensor with identical strides for size 1 dimension;
|
| 148 |
+
// Two cases could lead us here:
|
| 149 |
+
// a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
|
| 150 |
+
// b. N11W contiguous Tensor sliced on the W-dimension.
|
| 151 |
+
// ([N,1,1,1]@[W,W,W,W])
|
| 152 |
+
if (d == 0 && min == strides[1]) {
|
| 153 |
+
return false;
|
| 154 |
+
}
|
| 155 |
+
// This is necessary to:
|
| 156 |
+
// 1. distinguish the memory_format of N1H1;
|
| 157 |
+
// [H, 1, 1, 1] channels_last stride
|
| 158 |
+
// [H, H, 1, 1] contiguous stride
|
| 159 |
+
// 2. permutation of 1C1W:
|
| 160 |
+
// [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
|
| 161 |
+
// [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last
|
| 162 |
+
min = strides[d];
|
| 163 |
+
if (sizes[d] > 1) {
|
| 164 |
+
min *= sizes[d];
|
| 165 |
+
}
|
| 166 |
+
}
|
| 167 |
+
return true;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
template <typename T>
|
| 171 |
+
inline bool is_channels_last_strides_3d_s5(
|
| 172 |
+
const ArrayRef<T> sizes,
|
| 173 |
+
const ArrayRef<T> strides) {
|
| 174 |
+
T min = 0;
|
| 175 |
+
if (strides[1] == 0) {
|
| 176 |
+
return false;
|
| 177 |
+
}
|
| 178 |
+
for (auto& d : {1, 4, 3, 2, 0}) {
|
| 179 |
+
if (sizes[d] == 0) {
|
| 180 |
+
return false;
|
| 181 |
+
}
|
| 182 |
+
if (strides[d] < min) {
|
| 183 |
+
return false;
|
| 184 |
+
}
|
| 185 |
+
if (d == 0 && min == strides[1]) {
|
| 186 |
+
return false;
|
| 187 |
+
}
|
| 188 |
+
min = strides[d];
|
| 189 |
+
if (sizes[d] > 1) {
|
| 190 |
+
min *= sizes[d];
|
| 191 |
+
}
|
| 192 |
+
}
|
| 193 |
+
return true;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
// Note [Ambiguous is_channels_last_strides_xd]
|
| 197 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 198 |
+
// The flaw of carrying memory_format implicitly through strides is very hard
|
| 199 |
+
// to WAR properly. issue #24090
|
| 200 |
+
// Without the history of permutation, we can't infer the memory_format of a
|
| 201 |
+
// tensor from the snapshot of its size & stride
|
| 202 |
+
// e.g.
|
| 203 |
+
//
|
| 204 |
+
// 1. We can NOT specify the memory_format of N111 tensor through strides in a
|
| 205 |
+
// meaningful way;
|
| 206 |
+
//
|
| 207 |
+
// 2. Two path that ended up with identical size/stride
|
| 208 |
+
// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W]
|
| 209 |
+
// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C]
|
| 210 |
+
// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer
|
| 211 |
+
// the memory_format of the original tensor.
|
| 212 |
+
//
|
| 213 |
+
// Due to the limitations, our temporary WAR `is_channels_last_strides` does the
|
| 214 |
+
// best effort to infer whether the original memory_format of a tensor is
|
| 215 |
+
// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered
|
| 216 |
+
// by their importance):
|
| 217 |
+
// 1. Ensure that normal shape manipulation does not accidentally change the
|
| 218 |
+
// MemoryFormat of an existing tensor.
|
| 219 |
+
// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors;
|
| 220 |
+
//
|
| 221 |
+
// The function does so via checking strides of the tensor, including strides of
|
| 222 |
+
// size-1 dimensions. Although conventionally PyTorch implies no restriction on
|
| 223 |
+
// trivial stride (stride for size-1 dimension).
|
| 224 |
+
//
|
| 225 |
+
// Note that this approach is a compromise. We did not solve the problem
|
| 226 |
+
// completely. Many cases we will not be able to infer the correct memory
|
| 227 |
+
// format.
|
| 228 |
+
// The implementation of `is_channels_last_strides` is to serve the objectives:
|
| 229 |
+
// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental
|
| 230 |
+
// conversion); Best effort to maintain the ChannelsLast flag.
|
| 231 |
+
//
|
| 232 |
+
// Due to the fact that this is not a bulletproof solution, through testing
|
| 233 |
+
// (aten/src/ATen/test/memory_format_test.cpp)
|
| 234 |
+
// a. we ensure that the common tasks are supported;
|
| 235 |
+
// a. we identify corner cases where the implementation compromises on.
|
| 236 |
+
//
|
| 237 |
+
// By the time accumulated permutation is enabled to replace implicit
|
| 238 |
+
// memory_format through strides, we should be updating our tests and fix the
|
| 239 |
+
// issues in our tests.
|
| 240 |
+
//
|
| 241 |
+
// We use Channels Last 2d as an example above.
|
| 242 |
+
// This is a general problem for all the is_channels_last_strides_xd
|
| 243 |
+
// implementation. Please check the helper functions
|
| 244 |
+
// (is_channels_last_strides_*d_s*) for more details.
|
| 245 |
+
|
| 246 |
+
template <typename T>
|
| 247 |
+
inline bool is_channels_last_strides_2d(
|
| 248 |
+
const ArrayRef<T> sizes,
|
| 249 |
+
const ArrayRef<T> strides) {
|
| 250 |
+
switch (sizes.size()) {
|
| 251 |
+
case 4:
|
| 252 |
+
return is_channels_last_strides_2d_s4(sizes, strides);
|
| 253 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 254 |
+
case 3:
|
| 255 |
+
// TODO dim == 3 case will be enabled once it is fully tested
|
| 256 |
+
return false;
|
| 257 |
+
default:
|
| 258 |
+
return false;
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
template <typename T>
|
| 263 |
+
inline bool is_channels_last_strides_3d(
|
| 264 |
+
const ArrayRef<T> sizes,
|
| 265 |
+
const ArrayRef<T> strides) {
|
| 266 |
+
switch (sizes.size()) {
|
| 267 |
+
case 5:
|
| 268 |
+
return is_channels_last_strides_3d_s5(sizes, strides);
|
| 269 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 270 |
+
case 4:
|
| 271 |
+
// TODO dim == 4 case will be enabled once it is fully tested
|
| 272 |
+
return false;
|
| 273 |
+
default:
|
| 274 |
+
return false;
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
inline bool is_channels_last_strides_2d(
|
| 279 |
+
const IntArrayRef sizes,
|
| 280 |
+
const IntArrayRef strides) {
|
| 281 |
+
return is_channels_last_strides_2d<int64_t>(sizes, strides);
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
inline bool is_channels_last_strides_3d(
|
| 285 |
+
const IntArrayRef sizes,
|
| 286 |
+
const IntArrayRef strides) {
|
| 287 |
+
return is_channels_last_strides_3d<int64_t>(sizes, strides);
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Exception.h>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <string>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
|
| 9 |
+
/**
|
| 10 |
+
* QScheme is an enum that specifies the type of quantization. This has a one
|
| 11 |
+
* to one correspondence with Quantizer
|
| 12 |
+
* Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes.
|
| 13 |
+
* Keep this file in sync with torch/nn/_qscheme.py
|
| 14 |
+
*/
|
| 15 |
+
enum class QScheme : uint8_t {
|
| 16 |
+
PER_TENSOR_AFFINE = 0,
|
| 17 |
+
PER_CHANNEL_AFFINE = 1,
|
| 18 |
+
PER_TENSOR_SYMMETRIC = 2,
|
| 19 |
+
PER_CHANNEL_SYMMETRIC = 3,
|
| 20 |
+
PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4,
|
| 21 |
+
COMPILE_TIME_NUM_QSCHEMES = 5,
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE;
|
| 25 |
+
constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE;
|
| 26 |
+
constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC;
|
| 27 |
+
constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC;
|
| 28 |
+
constexpr auto kPerChannelAffineFloatQParams =
|
| 29 |
+
QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS;
|
| 30 |
+
constexpr int COMPILE_TIME_NUM_QSCHEMES =
|
| 31 |
+
static_cast<int>(QScheme::COMPILE_TIME_NUM_QSCHEMES);
|
| 32 |
+
|
| 33 |
+
inline std::string toString(QScheme qscheme) {
|
| 34 |
+
switch (qscheme) {
|
| 35 |
+
case kPerTensorAffine:
|
| 36 |
+
return "per_tensor_affine";
|
| 37 |
+
case kPerChannelAffine:
|
| 38 |
+
return "per_channel_affine";
|
| 39 |
+
case kPerTensorSymmetric:
|
| 40 |
+
return "per_tensor_symmetric";
|
| 41 |
+
case kPerChannelSymmetric:
|
| 42 |
+
return "per_channel_symmetric";
|
| 43 |
+
case kPerChannelAffineFloatQParams:
|
| 44 |
+
return "per_channel_affine_float_qparams";
|
| 45 |
+
default:
|
| 46 |
+
TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast<int>(qscheme));
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SymNodeImpl.h>
|
| 4 |
+
#include <c10/macros/Export.h>
|
| 5 |
+
#include <c10/util/Exception.h>
|
| 6 |
+
#include <c10/util/intrusive_ptr.h>
|
| 7 |
+
#include <cstdint>
|
| 8 |
+
#include <optional>
|
| 9 |
+
#include <ostream>
|
| 10 |
+
#include <utility>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
|
| 14 |
+
class C10_API SymBool {
|
| 15 |
+
public:
|
| 16 |
+
/*implicit*/ SymBool(bool b) : data_(b){};
|
| 17 |
+
SymBool(SymNode ptr) : data_(false), ptr_(std::move(ptr)) {
|
| 18 |
+
TORCH_CHECK(ptr_->is_bool());
|
| 19 |
+
};
|
| 20 |
+
SymBool() : data_(false) {}
|
| 21 |
+
|
| 22 |
+
SymNodeImpl* toSymNodeImplUnowned() const {
|
| 23 |
+
return ptr_.get();
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
SymNodeImpl* release() && {
|
| 27 |
+
return std::move(ptr_).release();
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// Only valid if is_heap_allocated()
|
| 31 |
+
SymNode toSymNodeImpl() const;
|
| 32 |
+
|
| 33 |
+
// Guaranteed to return a SymNode, wrapping using base if necessary
|
| 34 |
+
SymNode wrap_node(const SymNode& base) const;
|
| 35 |
+
|
| 36 |
+
bool expect_bool() const {
|
| 37 |
+
std::optional<bool> c = maybe_as_bool();
|
| 38 |
+
TORCH_CHECK(c.has_value());
|
| 39 |
+
return *c;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
SymBool sym_and(const SymBool&) const;
|
| 43 |
+
SymBool sym_or(const SymBool&) const;
|
| 44 |
+
SymBool sym_not() const;
|
| 45 |
+
|
| 46 |
+
SymBool operator&(const SymBool& other) const {
|
| 47 |
+
return sym_and(other);
|
| 48 |
+
}
|
| 49 |
+
SymBool operator|(const SymBool& other) const {
|
| 50 |
+
return sym_or(other);
|
| 51 |
+
}
|
| 52 |
+
SymBool operator~() const {
|
| 53 |
+
return sym_not();
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
// Insert a guard for the bool to be its concrete value, and then return
|
| 57 |
+
// that value. Note that C++ comparison operations default to returning
|
| 58 |
+
// bool, so it's not so common to have to call this
|
| 59 |
+
bool guard_bool(const char* file, int64_t line) const;
|
| 60 |
+
bool expect_true(const char* file, int64_t line) const;
|
| 61 |
+
bool guard_size_oblivious(const char* file, int64_t line) const;
|
| 62 |
+
|
| 63 |
+
bool has_hint() const;
|
| 64 |
+
|
| 65 |
+
bool as_bool_unchecked() const {
|
| 66 |
+
return data_;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
std::optional<bool> maybe_as_bool() const {
|
| 70 |
+
if (!is_heap_allocated()) {
|
| 71 |
+
return std::make_optional(data_);
|
| 72 |
+
}
|
| 73 |
+
return toSymNodeImplUnowned()->constant_bool();
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
bool is_heap_allocated() const {
|
| 77 |
+
return ptr_;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
private:
|
| 81 |
+
// TODO: optimize to union
|
| 82 |
+
bool data_;
|
| 83 |
+
SymNode ptr_;
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s);
|
| 87 |
+
|
| 88 |
+
#define TORCH_SYM_CHECK(cond, ...) \
|
| 89 |
+
TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
|
| 90 |
+
#define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \
|
| 91 |
+
TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
|
| 92 |
+
|
| 93 |
+
inline bool guard_size_oblivious(
|
| 94 |
+
bool b,
|
| 95 |
+
const char* file [[maybe_unused]],
|
| 96 |
+
int64_t line [[maybe_unused]]) {
|
| 97 |
+
return b;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
inline bool guard_size_oblivious(
|
| 101 |
+
const c10::SymBool& b,
|
| 102 |
+
const char* file,
|
| 103 |
+
int64_t line) {
|
| 104 |
+
return b.guard_size_oblivious(file, line);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
#define TORCH_GUARD_SIZE_OBLIVIOUS(cond) \
|
| 108 |
+
c10::guard_size_oblivious((cond), __FILE__, __LINE__)
|
| 109 |
+
|
| 110 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SymBool.h>
|
| 4 |
+
#include <c10/core/SymNodeImpl.h>
|
| 5 |
+
#include <c10/macros/Export.h>
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/Exception.h>
|
| 8 |
+
#include <c10/util/intrusive_ptr.h>
|
| 9 |
+
|
| 10 |
+
#include <cstdint>
|
| 11 |
+
#include <limits>
|
| 12 |
+
#include <ostream>
|
| 13 |
+
#include <utility>
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
|
| 17 |
+
// NB: this is actually double precision; we're using the Python naming here
|
| 18 |
+
class C10_API SymFloat {
|
| 19 |
+
public:
|
| 20 |
+
/*implicit*/ SymFloat(double d) : data_(d){};
|
| 21 |
+
SymFloat(SymNode ptr)
|
| 22 |
+
: data_(std::numeric_limits<double>::quiet_NaN()), ptr_(std::move(ptr)) {
|
| 23 |
+
TORCH_CHECK(ptr_->is_float());
|
| 24 |
+
};
|
| 25 |
+
SymFloat() : data_(0.0) {}
|
| 26 |
+
|
| 27 |
+
SymNodeImpl* toSymNodeImplUnowned() const {
|
| 28 |
+
return ptr_.get();
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
SymNodeImpl* release() && {
|
| 32 |
+
return std::move(ptr_).release();
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// Only valid if is_symbolic()
|
| 36 |
+
SymNode toSymNodeImpl() const;
|
| 37 |
+
|
| 38 |
+
// Guaranteed to return a SymNode, wrapping using base if necessary
|
| 39 |
+
SymNode wrap_node(const SymNode& base) const;
|
| 40 |
+
|
| 41 |
+
double expect_float() const {
|
| 42 |
+
TORCH_CHECK(!is_symbolic());
|
| 43 |
+
return data_;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
SymFloat operator+(const SymFloat&) const;
|
| 47 |
+
SymFloat operator-(const SymFloat&) const;
|
| 48 |
+
SymFloat operator*(const SymFloat&) const;
|
| 49 |
+
SymFloat operator/(const SymFloat&) const;
|
| 50 |
+
|
| 51 |
+
SymBool sym_eq(const SymFloat&) const;
|
| 52 |
+
SymBool sym_ne(const SymFloat&) const;
|
| 53 |
+
SymBool sym_lt(const SymFloat&) const;
|
| 54 |
+
SymBool sym_le(const SymFloat&) const;
|
| 55 |
+
SymBool sym_gt(const SymFloat&) const;
|
| 56 |
+
SymBool sym_ge(const SymFloat&) const;
|
| 57 |
+
|
| 58 |
+
bool operator==(const SymFloat& o) const {
|
| 59 |
+
return sym_eq(o).guard_bool(__FILE__, __LINE__);
|
| 60 |
+
}
|
| 61 |
+
bool operator!=(const SymFloat& o) const {
|
| 62 |
+
return sym_ne(o).guard_bool(__FILE__, __LINE__);
|
| 63 |
+
}
|
| 64 |
+
bool operator<(const SymFloat& o) const {
|
| 65 |
+
return sym_lt(o).guard_bool(__FILE__, __LINE__);
|
| 66 |
+
}
|
| 67 |
+
bool operator<=(const SymFloat& o) const {
|
| 68 |
+
return sym_le(o).guard_bool(__FILE__, __LINE__);
|
| 69 |
+
}
|
| 70 |
+
bool operator>(const SymFloat& o) const {
|
| 71 |
+
return sym_gt(o).guard_bool(__FILE__, __LINE__);
|
| 72 |
+
}
|
| 73 |
+
bool operator>=(const SymFloat& o) const {
|
| 74 |
+
return sym_ge(o).guard_bool(__FILE__, __LINE__);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
SymFloat min(const SymFloat& sci) const;
|
| 78 |
+
SymFloat max(const SymFloat& sci) const;
|
| 79 |
+
|
| 80 |
+
// Need guidance on where to put this code
|
| 81 |
+
SymFloat sqrt() const;
|
| 82 |
+
|
| 83 |
+
// Insert a guard for the float to be its concrete value, and then return
|
| 84 |
+
// that value. This operation always works, even if the float is symbolic,
|
| 85 |
+
// so long as we know what the underlying value is. Don't blindly put this
|
| 86 |
+
// everywhere; you can cause overspecialization of PyTorch programs with
|
| 87 |
+
// this method.
|
| 88 |
+
//
|
| 89 |
+
// It should be called as guard_float(__FILE__, __LINE__). The file and line
|
| 90 |
+
// number can be used to diagnose overspecialization.
|
| 91 |
+
double guard_float(const char* file, int64_t line) const;
|
| 92 |
+
|
| 93 |
+
bool has_hint() const;
|
| 94 |
+
|
| 95 |
+
// N.B. It's important to keep this definition in the header
|
| 96 |
+
// as we expect if checks to be folded for mobile builds
|
| 97 |
+
// where `is_symbolic` is always false
|
| 98 |
+
C10_ALWAYS_INLINE bool is_symbolic() const {
|
| 99 |
+
return ptr_;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
double as_float_unchecked() const {
|
| 103 |
+
return data_;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
private:
|
| 107 |
+
// TODO: optimize to union
|
| 108 |
+
double data_;
|
| 109 |
+
SymNode ptr_;
|
| 110 |
+
};
|
| 111 |
+
|
| 112 |
+
C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s);
|
| 113 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h
ADDED
|
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SymBool.h>
|
| 4 |
+
#include <c10/core/SymNodeImpl.h>
|
| 5 |
+
#include <c10/macros/Export.h>
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/Exception.h>
|
| 8 |
+
#include <c10/util/Optional.h>
|
| 9 |
+
|
| 10 |
+
#include <cstdint>
|
| 11 |
+
#include <iterator>
|
| 12 |
+
#include <numeric>
|
| 13 |
+
#include <optional>
|
| 14 |
+
#include <ostream>
|
| 15 |
+
#include <type_traits>
|
| 16 |
+
|
| 17 |
+
namespace c10 {
|
| 18 |
+
|
| 19 |
+
class SymFloat;
|
| 20 |
+
|
| 21 |
+
// SymInt represents either a regular int64_t, or a symbolic integer
|
| 22 |
+
// (represented in a type erased way as SymNode). The intention is for SymInt
|
| 23 |
+
// to represent symbolic sizes that arise when doing shape computation in
|
| 24 |
+
// operator kernels. This allows for tracing through programs without baking in
|
| 25 |
+
// concrete sizes into kernel calls.
|
| 26 |
+
//
|
| 27 |
+
// SymInt has an API equivalent to int64_t. In particular, it is a value type.
|
| 28 |
+
// Internally, SymInt is represented in a clever packed way, so that it only
|
| 29 |
+
// occupies one word of space; but morally, it is a union between an int64_t
|
| 30 |
+
// and an intrusive pointer to SymNodeImpl.
|
| 31 |
+
//
|
| 32 |
+
// Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where
|
| 33 |
+
// is_int() returns true
|
| 34 |
+
|
| 35 |
+
class C10_API SymInt {
|
| 36 |
+
public:
|
| 37 |
+
enum Unchecked {
|
| 38 |
+
UNCHECKED,
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
/*implicit*/ SymInt(int64_t d) : data_(d) {
|
| 42 |
+
if (is_heap_allocated()) {
|
| 43 |
+
// Large negative number, heap allocate it
|
| 44 |
+
promote_to_negative();
|
| 45 |
+
}
|
| 46 |
+
};
|
| 47 |
+
SymInt() : data_(0) {}
|
| 48 |
+
SymInt(SymNode n);
|
| 49 |
+
|
| 50 |
+
// unchecked c-tor accepting raw `data_`
|
| 51 |
+
// One appropriate use for this is when you are constructing a symint
|
| 52 |
+
// in a situation where you know it is non-negative (or, if it is negative,
|
| 53 |
+
// the negative value is -1; i.e., not user controlled)
|
| 54 |
+
SymInt(Unchecked, int64_t d) : data_(d) {}
|
| 55 |
+
|
| 56 |
+
// TODO: these implementations are not optimal because they allocate a
|
| 57 |
+
// temporary and then use the move constructor/assignment
|
| 58 |
+
SymInt(const SymInt& s) : data_(0) {
|
| 59 |
+
if (s.is_heap_allocated()) {
|
| 60 |
+
*this = SymInt(s.toSymNode());
|
| 61 |
+
} else {
|
| 62 |
+
data_ = s.data_;
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
SymInt(SymInt&& s) noexcept : data_(s.data_) {
|
| 66 |
+
s.data_ = 0;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
SymInt& operator=(const SymInt& s) {
|
| 70 |
+
if (this != &s) {
|
| 71 |
+
if (s.is_heap_allocated()) {
|
| 72 |
+
*this = SymInt(s.toSymNode());
|
| 73 |
+
} else {
|
| 74 |
+
data_ = s.data_;
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
return *this;
|
| 78 |
+
}
|
| 79 |
+
SymInt& operator=(SymInt&& s) noexcept {
|
| 80 |
+
if (this != &s) {
|
| 81 |
+
release_(); // release the current SymNode if any
|
| 82 |
+
data_ = s.data_;
|
| 83 |
+
if (s.is_heap_allocated())
|
| 84 |
+
s.data_ = 0;
|
| 85 |
+
};
|
| 86 |
+
return *this;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
SymNodeImpl* toSymNodeImplUnowned() const {
|
| 90 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated());
|
| 91 |
+
uint64_t unextended_bits = static_cast<uint64_t>(data_) & ~MASK;
|
| 92 |
+
uint64_t sign_bit_mask = 1ULL << (62 - 1);
|
| 93 |
+
// https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c
|
| 94 |
+
uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask;
|
| 95 |
+
return static_cast<SymNodeImpl*>(
|
| 96 |
+
// NOLINTNEXTLINE(performance-no-int-to-ptr)
|
| 97 |
+
reinterpret_cast<void*>(static_cast<uintptr_t>(extended_bits)));
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
void release_() {
|
| 101 |
+
if (is_heap_allocated()) {
|
| 102 |
+
SymNode::reclaim(toSymNodeImplUnowned()); // steal
|
| 103 |
+
}
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
SymNodeImpl* release() && {
|
| 107 |
+
#ifndef C10_MOBILE
|
| 108 |
+
TORCH_INTERNAL_ASSERT(is_heap_allocated());
|
| 109 |
+
auto* r = toSymNodeImplUnowned();
|
| 110 |
+
data_ = 0; // transfer ownership
|
| 111 |
+
return r;
|
| 112 |
+
#else
|
| 113 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 114 |
+
#endif
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
// Only valid if is_heap_allocated()
|
| 118 |
+
SymNode toSymNode() const;
|
| 119 |
+
|
| 120 |
+
// Guaranteed to return a SymNode, wrapping using base if necessary
|
| 121 |
+
SymNode wrap_node(const SymNode& base) const;
|
| 122 |
+
|
| 123 |
+
~SymInt() {
|
| 124 |
+
release_();
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
// Require the int to be non-symbolic, and if it is symbolic raise an
|
| 128 |
+
// error. This is safe to use for C++ code that doesn't work for symbolic
|
| 129 |
+
// shapes, and you don't have time to fix it immediately, as if we
|
| 130 |
+
// try to trigger the path in C++ you'll appropriately get an error
|
| 131 |
+
int64_t expect_int() const {
|
| 132 |
+
if (auto r = maybe_as_int()) {
|
| 133 |
+
return *r;
|
| 134 |
+
}
|
| 135 |
+
TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(
|
| 136 |
+
false, "when unpacking SymInt, expected int but got ", *this);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
// Test if we have a hint for this int (e.g., guard_int would work).
|
| 140 |
+
// Most of the time this is true; it is only false when you have
|
| 141 |
+
// an unbacked SymInt.
|
| 142 |
+
bool has_hint() const;
|
| 143 |
+
|
| 144 |
+
// Insert a guard for the int to be its concrete value, and then return
|
| 145 |
+
// that value. This operation always works, even if the int is symbolic,
|
| 146 |
+
// so long as we know what the underlying value is (e.g., this won't work
|
| 147 |
+
// if you call it on the size of nonzero output). Don't blindly put this
|
| 148 |
+
// everywhere; you can cause overspecialization of PyTorch programs with
|
| 149 |
+
// this method.
|
| 150 |
+
//
|
| 151 |
+
// It should be called as guard_int(__FILE__, __LINE__). The file and line
|
| 152 |
+
// number can be used to diagnose overspecialization.
|
| 153 |
+
int64_t guard_int(const char* file, int64_t line) const;
|
| 154 |
+
|
| 155 |
+
// Insert a guard that this SymInt must be size-like, returning true if
|
| 156 |
+
// the integer actually is >= 0. Unlike manually performing a >= 0 test,
|
| 157 |
+
// if the SymInt in question is an unbacked SymInt (or, potentially in the
|
| 158 |
+
// future, if it contains unbacked SymInts), we will also treat the
|
| 159 |
+
// unbacked SymInt as statically testing >= 2 (which will prevent us from
|
| 160 |
+
// choking on, e.g., contiguity checks.)
|
| 161 |
+
bool expect_size(const char* file, int64_t line) const;
|
| 162 |
+
|
| 163 |
+
// Distinguish actual symbolic values from constants stored on the heap
|
| 164 |
+
bool is_symbolic() const {
|
| 165 |
+
return is_heap_allocated() &&
|
| 166 |
+
!toSymNodeImplUnowned()->constant_int().has_value();
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
// N.B. It's important to keep this definition in the header
|
| 170 |
+
// as we expect if checks to be folded for mobile builds
|
| 171 |
+
// where `is_heap_allocated` is always false and optimize dead code paths
|
| 172 |
+
C10_ALWAYS_INLINE bool is_heap_allocated() const {
|
| 173 |
+
#ifdef C10_MOBILE
|
| 174 |
+
return false;
|
| 175 |
+
#else
|
| 176 |
+
return !check_range(data_);
|
| 177 |
+
#endif
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
SymInt operator+(const SymInt& sci) const;
|
| 181 |
+
SymInt operator-(const SymInt& sci) const;
|
| 182 |
+
SymInt operator*(const SymInt& sci) const;
|
| 183 |
+
SymInt operator/(const SymInt& sci) const;
|
| 184 |
+
SymInt operator%(const SymInt& sci) const;
|
| 185 |
+
void operator*=(const SymInt& sci);
|
| 186 |
+
void operator+=(const SymInt& sci);
|
| 187 |
+
void operator/=(const SymInt& sci);
|
| 188 |
+
|
| 189 |
+
SymInt clone() const;
|
| 190 |
+
|
| 191 |
+
SymBool sym_eq(const SymInt&) const;
|
| 192 |
+
SymBool sym_ne(const SymInt&) const;
|
| 193 |
+
SymBool sym_lt(const SymInt&) const;
|
| 194 |
+
SymBool sym_le(const SymInt&) const;
|
| 195 |
+
SymBool sym_gt(const SymInt&) const;
|
| 196 |
+
SymBool sym_ge(const SymInt&) const;
|
| 197 |
+
|
| 198 |
+
bool operator==(const SymInt& o) const {
|
| 199 |
+
return sym_eq(o).guard_bool(__FILE__, __LINE__);
|
| 200 |
+
}
|
| 201 |
+
bool operator!=(const SymInt& o) const {
|
| 202 |
+
return sym_ne(o).guard_bool(__FILE__, __LINE__);
|
| 203 |
+
}
|
| 204 |
+
bool operator<(const SymInt& o) const {
|
| 205 |
+
return sym_lt(o).guard_bool(__FILE__, __LINE__);
|
| 206 |
+
}
|
| 207 |
+
bool operator<=(const SymInt& o) const {
|
| 208 |
+
return sym_le(o).guard_bool(__FILE__, __LINE__);
|
| 209 |
+
}
|
| 210 |
+
bool operator>(const SymInt& o) const {
|
| 211 |
+
return sym_gt(o).guard_bool(__FILE__, __LINE__);
|
| 212 |
+
}
|
| 213 |
+
bool operator>=(const SymInt& o) const {
|
| 214 |
+
return sym_ge(o).guard_bool(__FILE__, __LINE__);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
SymInt min(const SymInt& sci) const;
|
| 218 |
+
SymInt max(const SymInt& sci) const;
|
| 219 |
+
|
| 220 |
+
// If both are symbolic, this checks if
|
| 221 |
+
// they share the same node.
|
| 222 |
+
// If both are not symbolic this just checks normal equality.
|
| 223 |
+
bool is_same(const SymInt& other) const;
|
| 224 |
+
|
| 225 |
+
operator SymFloat() const;
|
| 226 |
+
|
| 227 |
+
// Don't use this. Prefer maybe_as_int instead
|
| 228 |
+
int64_t as_int_unchecked() const {
|
| 229 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
|
| 230 |
+
return data_;
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
std::optional<int64_t> maybe_as_int() const {
|
| 234 |
+
if (!is_heap_allocated()) {
|
| 235 |
+
return std::make_optional(data_);
|
| 236 |
+
}
|
| 237 |
+
auto* node = toSymNodeImplUnowned();
|
| 238 |
+
if (auto c = node->constant_int()) {
|
| 239 |
+
return c;
|
| 240 |
+
}
|
| 241 |
+
return node->maybe_as_int();
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
// Return whether the integer is directly coercible to a SymInt
|
| 245 |
+
// without requiring heap allocation. You don't need to use this
|
| 246 |
+
// to check if you can pass an integer to SymInt; this is guaranteed
|
| 247 |
+
// to work (it just might heap allocate!)
|
| 248 |
+
static bool check_range(int64_t i) {
|
| 249 |
+
return i > MAX_UNREPRESENTABLE_INT;
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
// Return the min representable integer as a SymInt without
|
| 253 |
+
// heap allocation. For quantities that count bytes (or larger),
|
| 254 |
+
// this is still much larger than you need, so you may consider
|
| 255 |
+
// using this as a more efficient version of MIN_INT
|
| 256 |
+
static constexpr int64_t min_representable_int() {
|
| 257 |
+
return MAX_UNREPRESENTABLE_INT + 1;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
private:
|
| 261 |
+
void promote_to_negative();
|
| 262 |
+
|
| 263 |
+
// Constraints on the internal representation:
|
| 264 |
+
//
|
| 265 |
+
// - Should represent positive and small negative ints
|
| 266 |
+
// - No conversion necessary for operations on ints
|
| 267 |
+
// - Must represent valid 64-bit pointers
|
| 268 |
+
// - Is symbolic test should be FAST (two arithmetic instructions is too
|
| 269 |
+
// much).
|
| 270 |
+
// This code being a hotpath is based on Strobelight profiles of
|
| 271 |
+
// is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd
|
| 272 |
+
// (you will need to change the time window).
|
| 273 |
+
//
|
| 274 |
+
// So, the scheme is to reserve large negative numbers (assuming
|
| 275 |
+
// two's complement):
|
| 276 |
+
//
|
| 277 |
+
// - 0b0.... means we are a positive int
|
| 278 |
+
// - 0b11... means we are a small negative int
|
| 279 |
+
// - 0b10... means we are are a pointer. This means that
|
| 280 |
+
// [-2^63, -2^62-1] are not representable as ints.
|
| 281 |
+
// We don't actually need all of this space as on x86_64
|
| 282 |
+
// as the top 16bits aren't used for anything
|
| 283 |
+
static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61;
|
| 284 |
+
static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61;
|
| 285 |
+
// We must manually translate the bit pattern test into a greater
|
| 286 |
+
// than test because compiler doesn't figure it out:
|
| 287 |
+
// https://godbolt.org/z/356aferaW
|
| 288 |
+
static constexpr int64_t MAX_UNREPRESENTABLE_INT =
|
| 289 |
+
-1LL & static_cast<int64_t>(~(1ULL << 62));
|
| 290 |
+
int64_t data_;
|
| 291 |
+
};
|
| 292 |
+
|
| 293 |
+
/// Sum of a list of SymInt; accumulates into the c10::SymInt expression
|
| 294 |
+
template <
|
| 295 |
+
typename C,
|
| 296 |
+
typename std::enable_if_t<
|
| 297 |
+
std::is_same_v<typename C::value_type, c10::SymInt>,
|
| 298 |
+
int> = 0>
|
| 299 |
+
inline c10::SymInt multiply_integers(const C& container) {
|
| 300 |
+
return std::accumulate(
|
| 301 |
+
container.begin(),
|
| 302 |
+
container.end(),
|
| 303 |
+
c10::SymInt(1),
|
| 304 |
+
[](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
template <
|
| 308 |
+
typename Iter,
|
| 309 |
+
typename = std::enable_if_t<std::is_same_v<
|
| 310 |
+
typename std::iterator_traits<Iter>::value_type,
|
| 311 |
+
c10::SymInt>>>
|
| 312 |
+
inline c10::SymInt multiply_integers(Iter begin, Iter end) {
|
| 313 |
+
return std::accumulate(
|
| 314 |
+
begin,
|
| 315 |
+
end,
|
| 316 |
+
c10::SymInt(1),
|
| 317 |
+
[](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
#define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
|
| 321 |
+
C10_API RetTy operator%(const SymInt& a, scalar_t b); \
|
| 322 |
+
C10_API RetTy operator%(scalar_t a, const SymInt& b);
|
| 323 |
+
|
| 324 |
+
#define DECLARE_SYMINT_OP(scalar_t, RetTy) \
|
| 325 |
+
C10_API RetTy operator+(const SymInt& a, scalar_t b); \
|
| 326 |
+
C10_API RetTy operator-(const SymInt& a, scalar_t b); \
|
| 327 |
+
C10_API RetTy operator*(const SymInt& a, scalar_t b); \
|
| 328 |
+
C10_API RetTy operator/(const SymInt& a, scalar_t b); \
|
| 329 |
+
C10_API RetTy operator+(scalar_t a, const SymInt& b); \
|
| 330 |
+
C10_API RetTy operator-(scalar_t a, const SymInt& b); \
|
| 331 |
+
C10_API RetTy operator*(scalar_t a, const SymInt& b); \
|
| 332 |
+
C10_API RetTy operator/(scalar_t a, const SymInt& b); \
|
| 333 |
+
C10_API bool operator==(const SymInt& a, scalar_t b); \
|
| 334 |
+
C10_API bool operator!=(const SymInt& a, scalar_t b); \
|
| 335 |
+
C10_API bool operator<(const SymInt& a, scalar_t b); \
|
| 336 |
+
C10_API bool operator<=(const SymInt& a, scalar_t b); \
|
| 337 |
+
C10_API bool operator>(const SymInt& a, scalar_t b); \
|
| 338 |
+
C10_API bool operator>=(const SymInt& a, scalar_t b); \
|
| 339 |
+
C10_API bool operator==(scalar_t a, const SymInt& b); \
|
| 340 |
+
C10_API bool operator!=(scalar_t a, const SymInt& b); \
|
| 341 |
+
C10_API bool operator<(scalar_t a, const SymInt& b); \
|
| 342 |
+
C10_API bool operator<=(scalar_t a, const SymInt& b); \
|
| 343 |
+
C10_API bool operator>(scalar_t a, const SymInt& b); \
|
| 344 |
+
C10_API bool operator>=(scalar_t a, const SymInt& b);
|
| 345 |
+
|
| 346 |
+
DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt)
|
| 347 |
+
DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt)
|
| 348 |
+
DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt)
|
| 349 |
+
DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt)
|
| 350 |
+
DECLARE_SYMINT_OP(int64_t, SymInt)
|
| 351 |
+
DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work
|
| 352 |
+
DECLARE_SYMINT_OP(uint64_t, SymInt)
|
| 353 |
+
DECLARE_SYMINT_OP(uint32_t, SymInt)
|
| 354 |
+
DECLARE_SYMINT_OP(double, SymFloat)
|
| 355 |
+
DECLARE_SYMINT_OP(float, SymFloat) // just for completeness
|
| 356 |
+
|
| 357 |
+
// On OSX size_t is different than uint64_t so we have to
|
| 358 |
+
// define it separately
|
| 359 |
+
#if defined(__APPLE__)
|
| 360 |
+
DECLARE_SYMINT_OP_INTONLY(size_t, SymInt)
|
| 361 |
+
DECLARE_SYMINT_OP(size_t, SymInt)
|
| 362 |
+
#endif
|
| 363 |
+
|
| 364 |
+
#undef DECLARE_SYMINT_OP
|
| 365 |
+
|
| 366 |
+
C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s);
|
| 367 |
+
C10_API SymInt operator-(const SymInt& s);
|
| 368 |
+
|
| 369 |
+
inline bool sym_eq(int64_t a, int64_t b) {
|
| 370 |
+
return a == b;
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
inline SymBool sym_eq(const SymInt& a, const SymInt& b) {
|
| 374 |
+
return a.sym_eq(b);
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
inline bool sym_ne(int64_t a, int64_t b) {
|
| 378 |
+
return a != b;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
inline SymBool sym_ne(const SymInt& a, const SymInt& b) {
|
| 382 |
+
return a.sym_ne(b);
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
inline bool sym_lt(int64_t a, int64_t b) {
|
| 386 |
+
return a < b;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
inline SymBool sym_lt(const SymInt& a, const SymInt& b) {
|
| 390 |
+
return a.sym_lt(b);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
inline bool sym_le(int64_t a, int64_t b) {
|
| 394 |
+
return a <= b;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
inline SymBool sym_le(const SymInt& a, const SymInt& b) {
|
| 398 |
+
return a.sym_le(b);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
inline bool sym_gt(int64_t a, int64_t b) {
|
| 402 |
+
return a > b;
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
inline SymBool sym_gt(const SymInt& a, const SymInt& b) {
|
| 406 |
+
return a.sym_gt(b);
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
inline bool sym_ge(int64_t a, int64_t b) {
|
| 410 |
+
return a >= b;
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
inline SymBool sym_ge(const SymInt& a, const SymInt& b) {
|
| 414 |
+
return a.sym_ge(b);
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
inline bool definitely_true(
|
| 418 |
+
const c10::SymBool& b,
|
| 419 |
+
const char* file,
|
| 420 |
+
int64_t line) {
|
| 421 |
+
return b.has_hint() && b.guard_bool(file, line);
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
#include <c10/util/Exception.h>
|
| 6 |
+
#include <c10/util/intrusive_ptr.h>
|
| 7 |
+
#include <cstdint>
|
| 8 |
+
#include <optional>
|
| 9 |
+
#include <ostream>
|
| 10 |
+
#include <string>
|
| 11 |
+
|
| 12 |
+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-parameter")
|
| 13 |
+
|
| 14 |
+
namespace c10 {
|
| 15 |
+
|
| 16 |
+
class SymNodeImpl;
|
| 17 |
+
using SymNode = c10::intrusive_ptr<SymNodeImpl>;
|
| 18 |
+
|
| 19 |
+
// When you add a method, you also need to edit
|
| 20 |
+
// torch/csrc/jit/python/init.cpp
|
| 21 |
+
// torch/csrc/utils/python_symnode.h
|
| 22 |
+
// c10/core/ConstantSymNodeImpl.h
|
| 23 |
+
class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
|
| 24 |
+
public:
|
| 25 |
+
~SymNodeImpl() override = default;
|
| 26 |
+
|
| 27 |
+
template <typename T>
|
| 28 |
+
c10::intrusive_ptr<T> dyn_cast() const {
|
| 29 |
+
return c10::intrusive_ptr<T>::reclaim_copy(dynamic_cast<T*>(this));
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
// these could be pure virtual when we implement LTC versions
|
| 33 |
+
virtual bool is_int() {
|
| 34 |
+
TORCH_CHECK(false, "NYI");
|
| 35 |
+
}
|
| 36 |
+
virtual bool is_bool() {
|
| 37 |
+
TORCH_CHECK(false, "NYI");
|
| 38 |
+
}
|
| 39 |
+
virtual bool is_float() {
|
| 40 |
+
TORCH_CHECK(false, "NYI");
|
| 41 |
+
}
|
| 42 |
+
virtual bool is_nested_int() const {
|
| 43 |
+
return false;
|
| 44 |
+
}
|
| 45 |
+
virtual SymNode add(const SymNode& other) {
|
| 46 |
+
TORCH_CHECK(false, "NYI");
|
| 47 |
+
}
|
| 48 |
+
virtual SymNode sub(const SymNode& other) {
|
| 49 |
+
TORCH_CHECK(false, "NYI");
|
| 50 |
+
}
|
| 51 |
+
virtual SymNode mul(const SymNode& other) {
|
| 52 |
+
TORCH_CHECK(false, "NYI");
|
| 53 |
+
}
|
| 54 |
+
// NB: legacy, prefer float_truediv or int_truediv
|
| 55 |
+
virtual SymNode truediv(const SymNode& other) {
|
| 56 |
+
TORCH_CHECK(false, "NYI");
|
| 57 |
+
}
|
| 58 |
+
virtual SymNode float_truediv(const SymNode& other) {
|
| 59 |
+
return truediv(other);
|
| 60 |
+
}
|
| 61 |
+
virtual SymNode int_truediv(const SymNode& other) {
|
| 62 |
+
return truediv(other);
|
| 63 |
+
}
|
| 64 |
+
// NB: legacy, prefer float_pow or pow_by_natural
|
| 65 |
+
virtual SymNode pow(const SymNode& other) {
|
| 66 |
+
TORCH_CHECK(false, "NYI");
|
| 67 |
+
}
|
| 68 |
+
virtual SymNode float_pow(const SymNode& other) {
|
| 69 |
+
return pow(other);
|
| 70 |
+
}
|
| 71 |
+
virtual SymNode pow_by_natural(const SymNode& other) {
|
| 72 |
+
return pow(other);
|
| 73 |
+
}
|
| 74 |
+
// NB: legacy, prefer int_floordiv
|
| 75 |
+
virtual SymNode floordiv(const SymNode& other) {
|
| 76 |
+
TORCH_CHECK(false, "NYI");
|
| 77 |
+
}
|
| 78 |
+
virtual SymNode int_floordiv(const SymNode& other) {
|
| 79 |
+
return floordiv(other);
|
| 80 |
+
}
|
| 81 |
+
virtual SymNode mod(const SymNode& other) {
|
| 82 |
+
TORCH_CHECK(false, "NYI");
|
| 83 |
+
}
|
| 84 |
+
virtual SymNode eq(const SymNode& other) {
|
| 85 |
+
TORCH_CHECK(false, "NYI");
|
| 86 |
+
}
|
| 87 |
+
virtual SymNode ne(const SymNode& other) {
|
| 88 |
+
TORCH_CHECK(false, "NYI");
|
| 89 |
+
}
|
| 90 |
+
virtual SymNode gt(const SymNode& other) {
|
| 91 |
+
TORCH_CHECK(false, "NYI");
|
| 92 |
+
}
|
| 93 |
+
virtual SymNode lt(const SymNode& other) {
|
| 94 |
+
TORCH_CHECK(false, "NYI");
|
| 95 |
+
}
|
| 96 |
+
virtual SymNode le(const SymNode& other) {
|
| 97 |
+
TORCH_CHECK(false, "NYI");
|
| 98 |
+
}
|
| 99 |
+
virtual SymNode ge(const SymNode& other) {
|
| 100 |
+
TORCH_CHECK(false, "NYI");
|
| 101 |
+
}
|
| 102 |
+
virtual SymNode ceil() {
|
| 103 |
+
TORCH_CHECK(false, "NYI");
|
| 104 |
+
}
|
| 105 |
+
virtual SymNode floor() {
|
| 106 |
+
TORCH_CHECK(false, "NYI");
|
| 107 |
+
}
|
| 108 |
+
virtual SymNode neg() {
|
| 109 |
+
TORCH_CHECK(false, "NYI");
|
| 110 |
+
};
|
| 111 |
+
virtual SymNode sym_min(const SymNode& other) {
|
| 112 |
+
TORCH_CHECK(false, "NYI");
|
| 113 |
+
};
|
| 114 |
+
virtual SymNode sym_max(const SymNode& other) {
|
| 115 |
+
TORCH_CHECK(false, "NYI");
|
| 116 |
+
};
|
| 117 |
+
virtual SymNode sym_or(const SymNode& other) {
|
| 118 |
+
TORCH_CHECK(false, "NYI");
|
| 119 |
+
};
|
| 120 |
+
virtual SymNode sym_and(const SymNode& other) {
|
| 121 |
+
TORCH_CHECK(false, "NYI");
|
| 122 |
+
};
|
| 123 |
+
virtual SymNode sym_not() {
|
| 124 |
+
TORCH_CHECK(false, "NYI");
|
| 125 |
+
};
|
| 126 |
+
virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) {
|
| 127 |
+
TORCH_CHECK(false, "NYI");
|
| 128 |
+
};
|
| 129 |
+
// NB: self is ignored here, only the arguments are used
|
| 130 |
+
virtual SymNode is_contiguous(
|
| 131 |
+
ArrayRef<SymNode> sizes,
|
| 132 |
+
ArrayRef<SymNode> strides) {
|
| 133 |
+
TORCH_CHECK(false, "NYI");
|
| 134 |
+
};
|
| 135 |
+
virtual SymNode is_channels_last_contiguous_2d(
|
| 136 |
+
ArrayRef<SymNode> sizes,
|
| 137 |
+
ArrayRef<SymNode> strides) {
|
| 138 |
+
TORCH_CHECK(false, "NYI");
|
| 139 |
+
};
|
| 140 |
+
virtual SymNode is_channels_last_contiguous_3d(
|
| 141 |
+
ArrayRef<SymNode> sizes,
|
| 142 |
+
ArrayRef<SymNode> strides) {
|
| 143 |
+
TORCH_CHECK(false, "NYI");
|
| 144 |
+
};
|
| 145 |
+
virtual SymNode is_channels_last_strides_2d(
|
| 146 |
+
ArrayRef<SymNode> sizes,
|
| 147 |
+
ArrayRef<SymNode> strides) {
|
| 148 |
+
TORCH_CHECK(false, "NYI");
|
| 149 |
+
};
|
| 150 |
+
virtual SymNode is_channels_last_strides_3d(
|
| 151 |
+
ArrayRef<SymNode> sizes,
|
| 152 |
+
ArrayRef<SymNode> strides) {
|
| 153 |
+
TORCH_CHECK(false, "NYI");
|
| 154 |
+
};
|
| 155 |
+
virtual SymNode is_non_overlapping_and_dense(
|
| 156 |
+
ArrayRef<SymNode> sizes,
|
| 157 |
+
ArrayRef<SymNode> strides) {
|
| 158 |
+
TORCH_CHECK(false, "NYI");
|
| 159 |
+
};
|
| 160 |
+
virtual SymNode clone() {
|
| 161 |
+
TORCH_CHECK(false, "NYI");
|
| 162 |
+
};
|
| 163 |
+
virtual SymNode sym_float() {
|
| 164 |
+
TORCH_CHECK(false, "NYI");
|
| 165 |
+
}
|
| 166 |
+
virtual SymNode wrap_int(int64_t num) {
|
| 167 |
+
TORCH_CHECK(false, "NYI");
|
| 168 |
+
};
|
| 169 |
+
virtual SymNode wrap_float(double num) {
|
| 170 |
+
TORCH_CHECK(false, "NYI");
|
| 171 |
+
};
|
| 172 |
+
virtual SymNode wrap_bool(bool num) {
|
| 173 |
+
TORCH_CHECK(false, "NYI");
|
| 174 |
+
};
|
| 175 |
+
virtual int64_t guard_int(const char* file, int64_t line) {
|
| 176 |
+
TORCH_CHECK(false, "NYI");
|
| 177 |
+
};
|
| 178 |
+
virtual bool guard_bool(const char* file, int64_t line) {
|
| 179 |
+
TORCH_CHECK(false, "NYI");
|
| 180 |
+
};
|
| 181 |
+
virtual double guard_float(const char* file, int64_t line) {
|
| 182 |
+
TORCH_CHECK(false, "NYI");
|
| 183 |
+
};
|
| 184 |
+
virtual bool guard_size_oblivious(const char* file, int64_t line) {
|
| 185 |
+
// No improvement for unbacked SymBools by default, replace this
|
| 186 |
+
// with a better implementation!
|
| 187 |
+
return guard_bool(file, line);
|
| 188 |
+
}
|
| 189 |
+
virtual bool expect_true(const char* file, int64_t line) {
|
| 190 |
+
// No improvement for unbacked SymBools by default, replace this
|
| 191 |
+
// with a better implementation!
|
| 192 |
+
return guard_bool(file, line);
|
| 193 |
+
};
|
| 194 |
+
virtual bool expect_size(const char* file, int64_t line) {
|
| 195 |
+
// No improvement for unbacked SymInts by default, replace this
|
| 196 |
+
// with a better implementation!
|
| 197 |
+
return ge(wrap_int(0))->guard_bool(file, line);
|
| 198 |
+
};
|
| 199 |
+
virtual int64_t int_() {
|
| 200 |
+
TORCH_CHECK(false, "NYI");
|
| 201 |
+
};
|
| 202 |
+
virtual bool bool_() {
|
| 203 |
+
TORCH_CHECK(false, "NYI");
|
| 204 |
+
};
|
| 205 |
+
virtual bool has_hint() {
|
| 206 |
+
TORCH_CHECK(false, "NYI");
|
| 207 |
+
};
|
| 208 |
+
virtual std::string str() {
|
| 209 |
+
TORCH_CHECK(false, "NYI");
|
| 210 |
+
};
|
| 211 |
+
virtual std::string _graph_repr() {
|
| 212 |
+
return str();
|
| 213 |
+
};
|
| 214 |
+
virtual std::optional<int64_t> nested_int() {
|
| 215 |
+
return std::nullopt;
|
| 216 |
+
}
|
| 217 |
+
virtual std::optional<int64_t> nested_int_coeff() {
|
| 218 |
+
return std::nullopt;
|
| 219 |
+
}
|
| 220 |
+
virtual std::optional<int64_t> constant_int() {
|
| 221 |
+
return std::nullopt;
|
| 222 |
+
}
|
| 223 |
+
virtual std::optional<bool> constant_bool() {
|
| 224 |
+
return std::nullopt;
|
| 225 |
+
}
|
| 226 |
+
virtual std::optional<int64_t> maybe_as_int() {
|
| 227 |
+
return std::nullopt;
|
| 228 |
+
}
|
| 229 |
+
virtual bool is_constant() {
|
| 230 |
+
return false;
|
| 231 |
+
}
|
| 232 |
+
virtual bool is_symbolic() {
|
| 233 |
+
return true;
|
| 234 |
+
}
|
| 235 |
+
std::ostream& operator<<(std::ostream& os) {
|
| 236 |
+
os << str();
|
| 237 |
+
return os;
|
| 238 |
+
}
|
| 239 |
+
};
|
| 240 |
+
|
| 241 |
+
} // namespace c10
|
| 242 |
+
C10_DIAGNOSTIC_POP()
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SymInt.h>
|
| 4 |
+
#include <c10/macros/Export.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
#include <utility>
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
|
| 11 |
+
namespace detail {
|
| 12 |
+
// This template can only be specialized at int64_t and c10::SymInt;
|
| 13 |
+
// you'll get linker errors otherwise
|
| 14 |
+
template <typename T>
|
| 15 |
+
C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar);
|
| 16 |
+
} // namespace detail
|
| 17 |
+
|
| 18 |
+
template <typename T>
|
| 19 |
+
T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) {
|
| 20 |
+
// Inline the fast paths
|
| 21 |
+
if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) {
|
| 22 |
+
// For SymInts, we want an explicit control flow to trigger a guard, so we
|
| 23 |
+
// may as well branch too.
|
| 24 |
+
if (dim < 0) {
|
| 25 |
+
return dim + dim_post_expr;
|
| 26 |
+
}
|
| 27 |
+
return dim;
|
| 28 |
+
}
|
| 29 |
+
// Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors)
|
| 30 |
+
return c10::detail::maybe_wrap_dim_slow<T>(
|
| 31 |
+
std::move(dim), std::move(dim_post_expr), wrap_scalar);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
inline int64_t maybe_wrap_dim(
|
| 35 |
+
int64_t dim,
|
| 36 |
+
int64_t dim_post_expr,
|
| 37 |
+
bool wrap_scalar = true) {
|
| 38 |
+
return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar);
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
inline c10::SymInt maybe_wrap_dim(
|
| 42 |
+
c10::SymInt dim,
|
| 43 |
+
c10::SymInt dim_post_expr,
|
| 44 |
+
bool wrap_scalar = true) {
|
| 45 |
+
return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/util/intrusive_ptr.h>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
struct StorageImpl;
|
| 8 |
+
class DataPtr;
|
| 9 |
+
}; // namespace c10
|
| 10 |
+
|
| 11 |
+
namespace c10::impl::cow {
|
| 12 |
+
|
| 13 |
+
// Creates a Copy-on-write (COW) clone of the given storage. This will also
|
| 14 |
+
// convert the given storage into a COW storage if it is not COW already.
|
| 15 |
+
//
|
| 16 |
+
// Converting the storage into a COW storage will not be successful if the
|
| 17 |
+
// storage's DataPtr has some context (`DataPtr::get_context()`) which is not
|
| 18 |
+
// equal to the data pointer (`DataPtr::get()`). In this case, a nullptr is
|
| 19 |
+
// returned.
|
| 20 |
+
C10_API c10::intrusive_ptr<StorageImpl> lazy_clone_storage(
|
| 21 |
+
StorageImpl& storage);
|
| 22 |
+
|
| 23 |
+
// Check if a storage has a simple DataPtr with no abnormal context
|
| 24 |
+
C10_API bool has_simple_data_ptr(const c10::StorageImpl& storage);
|
| 25 |
+
|
| 26 |
+
// Check if a DataPtr is COW
|
| 27 |
+
C10_API bool is_cow_data_ptr(const c10::DataPtr& data_ptr);
|
| 28 |
+
|
| 29 |
+
// Eagerly copies a COW storage's data, turning it into a non-COW storage.
|
| 30 |
+
C10_API void materialize_cow_storage(StorageImpl& storage);
|
| 31 |
+
|
| 32 |
+
} // namespace c10::impl::cow
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/impl/PyInterpreter.h>
|
| 4 |
+
|
| 5 |
+
namespace c10::impl {
|
| 6 |
+
|
| 7 |
+
struct C10_API GPUTrace {
|
| 8 |
+
// On the x86 architecture the atomic operations are lock-less.
|
| 9 |
+
static std::atomic<const PyInterpreter*> gpuTraceState;
|
| 10 |
+
|
| 11 |
+
// When PyTorch migrates to C++20, this should be changed to an atomic flag.
|
| 12 |
+
// Currently, the access to this variable is not synchronized, on the basis
|
| 13 |
+
// that it will only be flipped once and by the first interpreter that
|
| 14 |
+
// accesses it.
|
| 15 |
+
static bool haveState;
|
| 16 |
+
|
| 17 |
+
// This function will only register the first interpreter that tries to invoke
|
| 18 |
+
// it. For all of the next ones it will be a no-op.
|
| 19 |
+
static void set_trace(const PyInterpreter*);
|
| 20 |
+
|
| 21 |
+
static const PyInterpreter* get_trace() {
|
| 22 |
+
if (!haveState)
|
| 23 |
+
return nullptr;
|
| 24 |
+
return gpuTraceState.load(std::memory_order_acquire);
|
| 25 |
+
}
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
} // namespace c10::impl
|
videochat2/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <algorithm>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/ArrayRef.h>
|
| 8 |
+
#include <c10/util/SmallVector.h>
|
| 9 |
+
|
| 10 |
+
#define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5
|
| 11 |
+
|
| 12 |
+
namespace c10::impl {
|
| 13 |
+
|
| 14 |
+
// Packed container for TensorImpl sizes and strides.
|
| 15 |
+
// This design improves on the previous approach of using a pair of
|
| 16 |
+
// c10::SmallVector<int64_t, 5> by specializing for the operations we
|
| 17 |
+
// actually use and enforcing that the number of sizes is the same as
|
| 18 |
+
// the number of strides. The memory layout is as follows:
|
| 19 |
+
//
|
| 20 |
+
// 1 size_t for the size
|
| 21 |
+
// 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer
|
| 22 |
+
// to out-of-line array
|
| 23 |
+
class C10_API SizesAndStrides {
|
| 24 |
+
public:
|
| 25 |
+
// TODO: different iterator types for sizes & strides to prevent
|
| 26 |
+
// mixing the two accidentally.
|
| 27 |
+
using sizes_iterator = int64_t*;
|
| 28 |
+
using sizes_const_iterator = const int64_t*;
|
| 29 |
+
using strides_iterator = int64_t*;
|
| 30 |
+
using strides_const_iterator = const int64_t*;
|
| 31 |
+
|
| 32 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 33 |
+
SizesAndStrides() {
|
| 34 |
+
size_at_unchecked(0) = 0;
|
| 35 |
+
stride_at_unchecked(0) = 1;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
~SizesAndStrides() {
|
| 39 |
+
if (C10_UNLIKELY(!isInline())) {
|
| 40 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
| 41 |
+
free(outOfLineStorage_);
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 46 |
+
SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) {
|
| 47 |
+
if (C10_LIKELY(rhs.isInline())) {
|
| 48 |
+
copyDataInline(rhs);
|
| 49 |
+
} else {
|
| 50 |
+
allocateOutOfLineStorage(size_);
|
| 51 |
+
copyDataOutline(rhs);
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
SizesAndStrides& operator=(const SizesAndStrides& rhs) {
|
| 56 |
+
if (this == &rhs) {
|
| 57 |
+
return *this;
|
| 58 |
+
}
|
| 59 |
+
if (C10_LIKELY(rhs.isInline())) {
|
| 60 |
+
if (C10_UNLIKELY(!isInline())) {
|
| 61 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
| 62 |
+
free(outOfLineStorage_);
|
| 63 |
+
}
|
| 64 |
+
copyDataInline(rhs);
|
| 65 |
+
} else {
|
| 66 |
+
if (isInline()) {
|
| 67 |
+
allocateOutOfLineStorage(rhs.size_);
|
| 68 |
+
} else {
|
| 69 |
+
resizeOutOfLineStorage(rhs.size_);
|
| 70 |
+
}
|
| 71 |
+
copyDataOutline(rhs);
|
| 72 |
+
}
|
| 73 |
+
size_ = rhs.size_;
|
| 74 |
+
return *this;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
// Move from rhs. rhs.size() == 0 afterwards.
|
| 78 |
+
SizesAndStrides(SizesAndStrides&& rhs) noexcept : size_(rhs.size_) {
|
| 79 |
+
if (C10_LIKELY(isInline())) {
|
| 80 |
+
memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
|
| 81 |
+
} else {
|
| 82 |
+
outOfLineStorage_ = rhs.outOfLineStorage_;
|
| 83 |
+
rhs.outOfLineStorage_ = nullptr;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
rhs.size_ = 0;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
// Move from rhs. rhs.size() == 0 afterwards.
|
| 90 |
+
SizesAndStrides& operator=(SizesAndStrides&& rhs) noexcept {
|
| 91 |
+
if (this == &rhs) {
|
| 92 |
+
return *this;
|
| 93 |
+
}
|
| 94 |
+
if (C10_LIKELY(rhs.isInline())) {
|
| 95 |
+
if (C10_UNLIKELY(!isInline())) {
|
| 96 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
| 97 |
+
free(outOfLineStorage_);
|
| 98 |
+
}
|
| 99 |
+
copyDataInline(rhs);
|
| 100 |
+
} else {
|
| 101 |
+
// They're outline. We're going to steal their vector.
|
| 102 |
+
if (!isInline()) {
|
| 103 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
| 104 |
+
free(outOfLineStorage_);
|
| 105 |
+
}
|
| 106 |
+
outOfLineStorage_ = rhs.outOfLineStorage_;
|
| 107 |
+
rhs.outOfLineStorage_ = nullptr;
|
| 108 |
+
}
|
| 109 |
+
size_ = rhs.size_;
|
| 110 |
+
rhs.size_ = 0;
|
| 111 |
+
|
| 112 |
+
return *this;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
size_t size() const noexcept {
|
| 116 |
+
return size_;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
const int64_t* sizes_data() const noexcept {
|
| 120 |
+
if (C10_LIKELY(isInline())) {
|
| 121 |
+
return &inlineStorage_[0];
|
| 122 |
+
} else {
|
| 123 |
+
return &outOfLineStorage_[0];
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
int64_t* sizes_data() noexcept {
|
| 128 |
+
if (C10_LIKELY(isInline())) {
|
| 129 |
+
return &inlineStorage_[0];
|
| 130 |
+
} else {
|
| 131 |
+
return &outOfLineStorage_[0];
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
sizes_const_iterator sizes_begin() const noexcept {
|
| 136 |
+
return sizes_data();
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
sizes_iterator sizes_begin() noexcept {
|
| 140 |
+
return sizes_data();
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
sizes_const_iterator sizes_end() const noexcept {
|
| 144 |
+
return sizes_begin() + size();
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
sizes_iterator sizes_end() noexcept {
|
| 148 |
+
return sizes_begin() + size();
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
IntArrayRef sizes_arrayref() const noexcept {
|
| 152 |
+
return IntArrayRef{sizes_data(), size()};
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
void set_sizes(IntArrayRef newSizes) {
|
| 156 |
+
resize(newSizes.size());
|
| 157 |
+
std::copy(newSizes.begin(), newSizes.end(), sizes_begin());
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
void set_strides(IntArrayRef strides) {
|
| 161 |
+
TORCH_INTERNAL_ASSERT(strides.size() == size());
|
| 162 |
+
std::copy(strides.begin(), strides.end(), strides_begin());
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
const int64_t* strides_data() const noexcept {
|
| 166 |
+
if (C10_LIKELY(isInline())) {
|
| 167 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
| 168 |
+
} else {
|
| 169 |
+
return &outOfLineStorage_[size()];
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
int64_t* strides_data() noexcept {
|
| 174 |
+
if (C10_LIKELY(isInline())) {
|
| 175 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
| 176 |
+
} else {
|
| 177 |
+
return &outOfLineStorage_[size()];
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
strides_const_iterator strides_begin() const noexcept {
|
| 182 |
+
if (C10_LIKELY(isInline())) {
|
| 183 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
| 184 |
+
} else {
|
| 185 |
+
return &outOfLineStorage_[size()];
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
strides_iterator strides_begin() noexcept {
|
| 190 |
+
if (C10_LIKELY(isInline())) {
|
| 191 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
| 192 |
+
} else {
|
| 193 |
+
return &outOfLineStorage_[size()];
|
| 194 |
+
}
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
strides_const_iterator strides_end() const noexcept {
|
| 198 |
+
return strides_begin() + size();
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
strides_iterator strides_end() noexcept {
|
| 202 |
+
return strides_begin() + size();
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
IntArrayRef strides_arrayref() const noexcept {
|
| 206 |
+
return IntArrayRef{strides_data(), size()};
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
// Size accessors.
|
| 210 |
+
int64_t size_at(size_t idx) const noexcept {
|
| 211 |
+
assert(idx < size());
|
| 212 |
+
return sizes_data()[idx];
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
int64_t& size_at(size_t idx) noexcept {
|
| 216 |
+
assert(idx < size());
|
| 217 |
+
return sizes_data()[idx];
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
int64_t size_at_unchecked(size_t idx) const noexcept {
|
| 221 |
+
return sizes_data()[idx];
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
int64_t& size_at_unchecked(size_t idx) noexcept {
|
| 225 |
+
return sizes_data()[idx];
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
// Size accessors.
|
| 229 |
+
int64_t stride_at(size_t idx) const noexcept {
|
| 230 |
+
assert(idx < size());
|
| 231 |
+
return strides_data()[idx];
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
int64_t& stride_at(size_t idx) noexcept {
|
| 235 |
+
assert(idx < size());
|
| 236 |
+
return strides_data()[idx];
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
int64_t stride_at_unchecked(size_t idx) const noexcept {
|
| 240 |
+
return strides_data()[idx];
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
int64_t& stride_at_unchecked(size_t idx) noexcept {
|
| 244 |
+
return strides_data()[idx];
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
void resize(size_t newSize) {
|
| 248 |
+
const auto oldSize = size();
|
| 249 |
+
if (newSize == oldSize) {
|
| 250 |
+
return;
|
| 251 |
+
}
|
| 252 |
+
if (C10_LIKELY(
|
| 253 |
+
newSize <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE && isInline())) {
|
| 254 |
+
if (oldSize < newSize) {
|
| 255 |
+
const auto bytesToZero =
|
| 256 |
+
(newSize - oldSize) * sizeof(inlineStorage_[0]);
|
| 257 |
+
memset(&inlineStorage_[oldSize], 0, bytesToZero);
|
| 258 |
+
memset(
|
| 259 |
+
&inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE + oldSize],
|
| 260 |
+
0,
|
| 261 |
+
bytesToZero);
|
| 262 |
+
}
|
| 263 |
+
size_ = newSize;
|
| 264 |
+
} else {
|
| 265 |
+
resizeSlowPath(newSize, oldSize);
|
| 266 |
+
}
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
void resizeSlowPath(size_t newSize, size_t oldSize);
|
| 270 |
+
|
| 271 |
+
private:
|
| 272 |
+
bool isInline() const noexcept {
|
| 273 |
+
return size_ <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
void copyDataInline(const SizesAndStrides& rhs) {
|
| 277 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline());
|
| 278 |
+
memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
static size_t storageBytes(size_t size) noexcept {
|
| 282 |
+
return size * 2 * sizeof(int64_t);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
void allocateOutOfLineStorage(size_t size) {
|
| 286 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
| 287 |
+
outOfLineStorage_ = static_cast<int64_t*>(malloc(storageBytes(size)));
|
| 288 |
+
TORCH_CHECK(
|
| 289 |
+
outOfLineStorage_,
|
| 290 |
+
"Could not allocate memory for Tensor SizesAndStrides!");
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
void resizeOutOfLineStorage(size_t newSize) {
|
| 294 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline());
|
| 295 |
+
outOfLineStorage_ = static_cast<int64_t*>(
|
| 296 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
| 297 |
+
realloc(outOfLineStorage_, storageBytes(newSize)));
|
| 298 |
+
TORCH_CHECK(
|
| 299 |
+
outOfLineStorage_,
|
| 300 |
+
"Could not allocate memory for Tensor SizesAndStrides!");
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
void copyDataOutline(const SizesAndStrides& rhs) noexcept {
|
| 304 |
+
memcpy(outOfLineStorage_, rhs.outOfLineStorage_, storageBytes(rhs.size_));
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
size_t size_{1};
|
| 308 |
+
union {
|
| 309 |
+
int64_t* outOfLineStorage_;
|
| 310 |
+
// NOLINTNEXTLINE(*c-array*)
|
| 311 |
+
int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{};
|
| 312 |
+
};
|
| 313 |
+
};
|
| 314 |
+
|
| 315 |
+
} // namespace c10::impl
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/macros/Macros.h>
|
| 2 |
+
#include <c10/util/Backtrace.h>
|
| 3 |
+
#include <c10/util/env.h>
|
| 4 |
+
#include <cstdlib>
|
| 5 |
+
#include <exception>
|
| 6 |
+
#include <iostream>
|
| 7 |
+
#include <mutex>
|
| 8 |
+
#include <optional>
|
| 9 |
+
|
| 10 |
+
namespace c10 {
|
| 11 |
+
class AbortHandlerHelper {
|
| 12 |
+
public:
|
| 13 |
+
static AbortHandlerHelper& getInstance() {
|
| 14 |
+
#ifdef _WIN32
|
| 15 |
+
thread_local
|
| 16 |
+
#endif // _WIN32
|
| 17 |
+
static AbortHandlerHelper instance;
|
| 18 |
+
return instance;
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
void set(std::terminate_handler handler) {
|
| 22 |
+
std::lock_guard<std::mutex> lk(mutex);
|
| 23 |
+
if (!inited) {
|
| 24 |
+
prev = std::set_terminate(handler);
|
| 25 |
+
curr = std::get_terminate();
|
| 26 |
+
inited = true;
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
std::terminate_handler getPrev() const {
|
| 31 |
+
return prev;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
private:
|
| 35 |
+
std::terminate_handler prev = nullptr;
|
| 36 |
+
std::terminate_handler curr = nullptr;
|
| 37 |
+
bool inited = false;
|
| 38 |
+
std::mutex mutex;
|
| 39 |
+
AbortHandlerHelper() = default;
|
| 40 |
+
~AbortHandlerHelper() {
|
| 41 |
+
// Only restore the handler if we are the current one
|
| 42 |
+
if (inited && curr == std::get_terminate()) {
|
| 43 |
+
std::set_terminate(prev);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
public:
|
| 48 |
+
AbortHandlerHelper(AbortHandlerHelper const&) = delete;
|
| 49 |
+
void operator=(AbortHandlerHelper const&) = delete;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
namespace detail {
|
| 53 |
+
C10_ALWAYS_INLINE void terminate_handler() {
|
| 54 |
+
std::cout << "Unhandled exception caught in c10/util/AbortHandler.h" << '\n';
|
| 55 |
+
auto backtrace = get_backtrace();
|
| 56 |
+
std::cout << backtrace << '\n' << std::flush;
|
| 57 |
+
auto prev_handler = AbortHandlerHelper::getInstance().getPrev();
|
| 58 |
+
if (prev_handler) {
|
| 59 |
+
prev_handler();
|
| 60 |
+
} else {
|
| 61 |
+
std::abort();
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
} // namespace detail
|
| 65 |
+
|
| 66 |
+
C10_ALWAYS_INLINE void set_terminate_handler() {
|
| 67 |
+
bool use_custom_terminate = false;
|
| 68 |
+
// On Windows it is enabled by default based on
|
| 69 |
+
// https://github.com/pytorch/pytorch/pull/50320#issuecomment-763147062
|
| 70 |
+
#ifdef _WIN32
|
| 71 |
+
use_custom_terminate = true;
|
| 72 |
+
#endif // _WIN32
|
| 73 |
+
auto result = c10::utils::check_env("TORCH_CUSTOM_TERMINATE");
|
| 74 |
+
if (result != std::nullopt) {
|
| 75 |
+
use_custom_terminate = result.value();
|
| 76 |
+
}
|
| 77 |
+
if (use_custom_terminate) {
|
| 78 |
+
AbortHandlerHelper::getInstance().set(detail::terminate_handler);
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright 2023-present Facebook. All Rights Reserved.
|
| 2 |
+
|
| 3 |
+
#pragma once
|
| 4 |
+
|
| 5 |
+
#include <c10/macros/Export.h>
|
| 6 |
+
#include <array>
|
| 7 |
+
#include <chrono>
|
| 8 |
+
#include <cstddef>
|
| 9 |
+
#include <cstdint>
|
| 10 |
+
#include <ctime>
|
| 11 |
+
#include <functional>
|
| 12 |
+
#include <type_traits>
|
| 13 |
+
|
| 14 |
+
#if defined(C10_IOS) && defined(C10_MOBILE)
|
| 15 |
+
#include <sys/time.h> // for gettimeofday()
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
#if defined(__i386__) || defined(__x86_64__) || defined(__amd64__)
|
| 19 |
+
#define C10_RDTSC
|
| 20 |
+
#if defined(_MSC_VER)
|
| 21 |
+
#include <intrin.h>
|
| 22 |
+
#elif defined(__CUDACC__) || defined(__HIPCC__)
|
| 23 |
+
#undef C10_RDTSC
|
| 24 |
+
#elif defined(__clang__)
|
| 25 |
+
// `__rdtsc` is available by default.
|
| 26 |
+
// NB: This has to be first, because Clang will also define `__GNUC__`
|
| 27 |
+
#elif defined(__GNUC__)
|
| 28 |
+
#include <x86intrin.h>
|
| 29 |
+
#else
|
| 30 |
+
#undef C10_RDTSC
|
| 31 |
+
#endif
|
| 32 |
+
#endif
|
| 33 |
+
|
| 34 |
+
namespace c10 {
|
| 35 |
+
|
| 36 |
+
using time_t = int64_t;
|
| 37 |
+
using steady_clock_t = std::conditional_t<
|
| 38 |
+
std::chrono::high_resolution_clock::is_steady,
|
| 39 |
+
std::chrono::high_resolution_clock,
|
| 40 |
+
std::chrono::steady_clock>;
|
| 41 |
+
|
| 42 |
+
inline time_t getTimeSinceEpoch() {
|
| 43 |
+
auto now = std::chrono::system_clock::now().time_since_epoch();
|
| 44 |
+
return std::chrono::duration_cast<std::chrono::nanoseconds>(now).count();
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
inline time_t getTime(bool allow_monotonic = false) {
|
| 48 |
+
#if defined(C10_IOS) && defined(C10_MOBILE)
|
| 49 |
+
// clock_gettime is only available on iOS 10.0 or newer. Unlike OS X, iOS
|
| 50 |
+
// can't rely on CLOCK_REALTIME, as it is defined no matter if clock_gettime
|
| 51 |
+
// is implemented or not
|
| 52 |
+
struct timeval now;
|
| 53 |
+
gettimeofday(&now, NULL);
|
| 54 |
+
return static_cast<time_t>(now.tv_sec) * 1000000000 +
|
| 55 |
+
static_cast<time_t>(now.tv_usec) * 1000;
|
| 56 |
+
#elif defined(_WIN32) || defined(__MACH__)
|
| 57 |
+
return std::chrono::duration_cast<std::chrono::nanoseconds>(
|
| 58 |
+
steady_clock_t::now().time_since_epoch())
|
| 59 |
+
.count();
|
| 60 |
+
#else
|
| 61 |
+
// clock_gettime is *much* faster than std::chrono implementation on Linux
|
| 62 |
+
struct timespec t {};
|
| 63 |
+
auto mode = CLOCK_REALTIME;
|
| 64 |
+
if (allow_monotonic) {
|
| 65 |
+
mode = CLOCK_MONOTONIC;
|
| 66 |
+
}
|
| 67 |
+
clock_gettime(mode, &t);
|
| 68 |
+
return static_cast<time_t>(t.tv_sec) * 1000000000 +
|
| 69 |
+
static_cast<time_t>(t.tv_nsec);
|
| 70 |
+
#endif
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
// We often do not need to capture true wall times. If a fast mechanism such
|
| 74 |
+
// as TSC is available we can use that instead and convert back to epoch time
|
| 75 |
+
// during post processing. This greatly reduce the clock's contribution to
|
| 76 |
+
// profiling.
|
| 77 |
+
// http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/
|
| 78 |
+
// https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io
|
| 79 |
+
// TODO: We should use
|
| 80 |
+
// `https://github.com/google/benchmark/blob/main/src/cycleclock.h`
|
| 81 |
+
inline auto getApproximateTime() {
|
| 82 |
+
#if defined(C10_RDTSC)
|
| 83 |
+
return static_cast<uint64_t>(__rdtsc());
|
| 84 |
+
#else
|
| 85 |
+
return getTime();
|
| 86 |
+
#endif
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
using approx_time_t = decltype(getApproximateTime());
|
| 90 |
+
static_assert(
|
| 91 |
+
std::is_same_v<approx_time_t, int64_t> ||
|
| 92 |
+
std::is_same_v<approx_time_t, uint64_t>,
|
| 93 |
+
"Expected either int64_t (`getTime`) or uint64_t (some TSC reads).");
|
| 94 |
+
|
| 95 |
+
// Convert `getCount` results to Nanoseconds since unix epoch.
|
| 96 |
+
class C10_API ApproximateClockToUnixTimeConverter final {
|
| 97 |
+
public:
|
| 98 |
+
ApproximateClockToUnixTimeConverter();
|
| 99 |
+
std::function<time_t(approx_time_t)> makeConverter();
|
| 100 |
+
|
| 101 |
+
struct UnixAndApproximateTimePair {
|
| 102 |
+
time_t t_;
|
| 103 |
+
approx_time_t approx_t_;
|
| 104 |
+
};
|
| 105 |
+
static UnixAndApproximateTimePair measurePair();
|
| 106 |
+
|
| 107 |
+
private:
|
| 108 |
+
static constexpr size_t replicates = 1001;
|
| 109 |
+
using time_pairs = std::array<UnixAndApproximateTimePair, replicates>;
|
| 110 |
+
time_pairs measurePairs();
|
| 111 |
+
|
| 112 |
+
time_pairs start_times_;
|
| 113 |
+
};
|
| 114 |
+
|
| 115 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/BFloat16.h
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// Defines the bloat16 type (brain floating-point). This representation uses
|
| 4 |
+
// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa.
|
| 5 |
+
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <cmath>
|
| 8 |
+
#include <cstdint>
|
| 9 |
+
#include <cstring>
|
| 10 |
+
#include <iosfwd>
|
| 11 |
+
#include <ostream>
|
| 12 |
+
|
| 13 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM)
|
| 14 |
+
#include <cuda_bf16.h>
|
| 15 |
+
#endif
|
| 16 |
+
#if defined(__HIPCC__) && defined(USE_ROCM)
|
| 17 |
+
#include <hip/hip_bf16.h>
|
| 18 |
+
#endif
|
| 19 |
+
|
| 20 |
+
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
| 21 |
+
#if defined(CL_SYCL_LANGUAGE_VERSION)
|
| 22 |
+
#include <CL/sycl.hpp> // for SYCL 1.2.1
|
| 23 |
+
#else
|
| 24 |
+
#include <sycl/sycl.hpp> // for SYCL 2020
|
| 25 |
+
#endif
|
| 26 |
+
#include <ext/oneapi/bfloat16.hpp>
|
| 27 |
+
#endif
|
| 28 |
+
|
| 29 |
+
namespace c10 {
|
| 30 |
+
|
| 31 |
+
namespace detail {
|
| 32 |
+
inline C10_HOST_DEVICE float f32_from_bits(uint16_t src) {
|
| 33 |
+
float res = 0;
|
| 34 |
+
uint32_t tmp = src;
|
| 35 |
+
tmp <<= 16;
|
| 36 |
+
|
| 37 |
+
#if defined(USE_ROCM)
|
| 38 |
+
float* tempRes;
|
| 39 |
+
|
| 40 |
+
// We should be using memcpy in order to respect the strict aliasing rule
|
| 41 |
+
// but it fails in the HIP environment.
|
| 42 |
+
tempRes = reinterpret_cast<float*>(&tmp);
|
| 43 |
+
res = *tempRes;
|
| 44 |
+
#else
|
| 45 |
+
std::memcpy(&res, &tmp, sizeof(tmp));
|
| 46 |
+
#endif
|
| 47 |
+
|
| 48 |
+
return res;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
inline C10_HOST_DEVICE uint16_t bits_from_f32(float src) {
|
| 52 |
+
uint32_t res = 0;
|
| 53 |
+
|
| 54 |
+
#if defined(USE_ROCM)
|
| 55 |
+
// We should be using memcpy in order to respect the strict aliasing rule
|
| 56 |
+
// but it fails in the HIP environment.
|
| 57 |
+
uint32_t* tempRes = reinterpret_cast<uint32_t*>(&src);
|
| 58 |
+
res = *tempRes;
|
| 59 |
+
#else
|
| 60 |
+
std::memcpy(&res, &src, sizeof(res));
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
return res >> 16;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
inline C10_HOST_DEVICE uint16_t round_to_nearest_even(float src) {
|
| 67 |
+
#if defined(USE_ROCM)
|
| 68 |
+
if (src != src) {
|
| 69 |
+
#elif defined(_MSC_VER)
|
| 70 |
+
if (isnan(src)) {
|
| 71 |
+
#else
|
| 72 |
+
if (std::isnan(src)) {
|
| 73 |
+
#endif
|
| 74 |
+
return UINT16_C(0x7FC0);
|
| 75 |
+
} else {
|
| 76 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 77 |
+
union {
|
| 78 |
+
uint32_t U32; // NOLINT(facebook-hte-BadMemberName)
|
| 79 |
+
float F32; // NOLINT(facebook-hte-BadMemberName)
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
F32 = src;
|
| 83 |
+
uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF);
|
| 84 |
+
return static_cast<uint16_t>((U32 + rounding_bias) >> 16);
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
} // namespace detail
|
| 88 |
+
|
| 89 |
+
struct alignas(2) BFloat16 {
|
| 90 |
+
uint16_t x;
|
| 91 |
+
|
| 92 |
+
// HIP wants __host__ __device__ tag, CUDA does not
|
| 93 |
+
#if defined(USE_ROCM)
|
| 94 |
+
C10_HOST_DEVICE BFloat16() = default;
|
| 95 |
+
#else
|
| 96 |
+
BFloat16() = default;
|
| 97 |
+
#endif
|
| 98 |
+
|
| 99 |
+
struct from_bits_t {};
|
| 100 |
+
static constexpr C10_HOST_DEVICE from_bits_t from_bits() {
|
| 101 |
+
return from_bits_t();
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t)
|
| 105 |
+
: x(bits) {}
|
| 106 |
+
/* implicit */ inline C10_HOST_DEVICE BFloat16(float value);
|
| 107 |
+
inline C10_HOST_DEVICE operator float() const;
|
| 108 |
+
|
| 109 |
+
#if defined(__CUDACC__) && !defined(USE_ROCM)
|
| 110 |
+
inline C10_HOST_DEVICE BFloat16(const __nv_bfloat16& value);
|
| 111 |
+
explicit inline C10_HOST_DEVICE operator __nv_bfloat16() const;
|
| 112 |
+
#endif
|
| 113 |
+
#if defined(__HIPCC__) && defined(USE_ROCM)
|
| 114 |
+
inline C10_HOST_DEVICE BFloat16(const __hip_bfloat16& value);
|
| 115 |
+
explicit inline C10_HOST_DEVICE operator __hip_bfloat16() const;
|
| 116 |
+
#endif
|
| 117 |
+
|
| 118 |
+
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
|
| 119 |
+
inline C10_HOST_DEVICE BFloat16(const sycl::ext::oneapi::bfloat16& value);
|
| 120 |
+
explicit inline C10_HOST_DEVICE operator sycl::ext::oneapi::bfloat16() const;
|
| 121 |
+
#endif
|
| 122 |
+
};
|
| 123 |
+
|
| 124 |
+
C10_API inline std::ostream& operator<<(
|
| 125 |
+
std::ostream& out,
|
| 126 |
+
const BFloat16& value) {
|
| 127 |
+
out << (float)value;
|
| 128 |
+
return out;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
} // namespace c10
|
| 132 |
+
|
| 133 |
+
#include <c10/util/BFloat16-inl.h> // IWYU pragma: keep
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstddef>
|
| 4 |
+
#if defined(_MSC_VER)
|
| 5 |
+
#include <intrin.h>
|
| 6 |
+
#endif
|
| 7 |
+
|
| 8 |
+
namespace c10::utils {
|
| 9 |
+
|
| 10 |
+
/**
|
| 11 |
+
* This is a simple bitset class with sizeof(long long int) bits.
|
| 12 |
+
* You can set bits, unset bits, query bits by index,
|
| 13 |
+
* and query for the first set bit.
|
| 14 |
+
* Before using this class, please also take a look at std::bitset,
|
| 15 |
+
* which has more functionality and is more generic. It is probably
|
| 16 |
+
* a better fit for your use case. The sole reason for c10::utils::bitset
|
| 17 |
+
* to exist is that std::bitset misses a find_first_set() method.
|
| 18 |
+
*/
|
| 19 |
+
struct bitset final {
|
| 20 |
+
private:
|
| 21 |
+
#if defined(_MSC_VER)
|
| 22 |
+
// MSVCs _BitScanForward64 expects int64_t
|
| 23 |
+
using bitset_type = int64_t;
|
| 24 |
+
#else
|
| 25 |
+
// POSIX ffsll expects long long int
|
| 26 |
+
using bitset_type = long long int;
|
| 27 |
+
#endif
|
| 28 |
+
public:
|
| 29 |
+
static constexpr size_t NUM_BITS() {
|
| 30 |
+
return 8 * sizeof(bitset_type);
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
constexpr bitset() noexcept = default;
|
| 34 |
+
constexpr bitset(const bitset&) noexcept = default;
|
| 35 |
+
constexpr bitset(bitset&&) noexcept = default;
|
| 36 |
+
// there is an issure for gcc 5.3.0 when define default function as constexpr
|
| 37 |
+
// see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754.
|
| 38 |
+
bitset& operator=(const bitset&) noexcept = default;
|
| 39 |
+
bitset& operator=(bitset&&) noexcept = default;
|
| 40 |
+
|
| 41 |
+
constexpr void set(size_t index) noexcept {
|
| 42 |
+
bitset_ |= (static_cast<long long int>(1) << index);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
constexpr void unset(size_t index) noexcept {
|
| 46 |
+
bitset_ &= ~(static_cast<long long int>(1) << index);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
constexpr bool get(size_t index) const noexcept {
|
| 50 |
+
return bitset_ & (static_cast<long long int>(1) << index);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
constexpr bool is_entirely_unset() const noexcept {
|
| 54 |
+
return 0 == bitset_;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// Call the given functor with the index of each bit that is set
|
| 58 |
+
template <class Func>
|
| 59 |
+
void for_each_set_bit(Func&& func) const {
|
| 60 |
+
bitset cur = *this;
|
| 61 |
+
size_t index = cur.find_first_set();
|
| 62 |
+
while (0 != index) {
|
| 63 |
+
// -1 because find_first_set() is not one-indexed.
|
| 64 |
+
index -= 1;
|
| 65 |
+
func(index);
|
| 66 |
+
cur.unset(index);
|
| 67 |
+
index = cur.find_first_set();
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
private:
|
| 72 |
+
// Return the index of the first set bit. The returned index is one-indexed
|
| 73 |
+
// (i.e. if the very first bit is set, this function returns '1'), and a
|
| 74 |
+
// return of '0' means that there was no bit set.
|
| 75 |
+
size_t find_first_set() const {
|
| 76 |
+
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
|
| 77 |
+
unsigned long result;
|
| 78 |
+
bool has_bits_set = (0 != _BitScanForward64(&result, bitset_));
|
| 79 |
+
if (!has_bits_set) {
|
| 80 |
+
return 0;
|
| 81 |
+
}
|
| 82 |
+
return result + 1;
|
| 83 |
+
#elif defined(_MSC_VER) && defined(_M_IX86)
|
| 84 |
+
unsigned long result;
|
| 85 |
+
if (static_cast<uint32_t>(bitset_) != 0) {
|
| 86 |
+
bool has_bits_set =
|
| 87 |
+
(0 != _BitScanForward(&result, static_cast<uint32_t>(bitset_)));
|
| 88 |
+
if (!has_bits_set) {
|
| 89 |
+
return 0;
|
| 90 |
+
}
|
| 91 |
+
return result + 1;
|
| 92 |
+
} else {
|
| 93 |
+
bool has_bits_set =
|
| 94 |
+
(0 != _BitScanForward(&result, static_cast<uint32_t>(bitset_ >> 32)));
|
| 95 |
+
if (!has_bits_set) {
|
| 96 |
+
return 32;
|
| 97 |
+
}
|
| 98 |
+
return result + 33;
|
| 99 |
+
}
|
| 100 |
+
#else
|
| 101 |
+
return __builtin_ffsll(bitset_);
|
| 102 |
+
#endif
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
friend bool operator==(bitset lhs, bitset rhs) noexcept {
|
| 106 |
+
return lhs.bitset_ == rhs.bitset_;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
bitset_type bitset_{0};
|
| 110 |
+
};
|
| 111 |
+
|
| 112 |
+
inline bool operator!=(bitset lhs, bitset rhs) noexcept {
|
| 113 |
+
return !(lhs == rhs);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
} // namespace c10::utils
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ConstexprCrc.h
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/IdWrapper.h>
|
| 4 |
+
#include <c10/util/string_view.h>
|
| 5 |
+
#include <cstddef>
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
|
| 8 |
+
namespace c10::util {
|
| 9 |
+
|
| 10 |
+
namespace detail {
|
| 11 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
| 12 |
+
constexpr uint64_t crc64_table[] = {
|
| 13 |
+
0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2,
|
| 14 |
+
0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6,
|
| 15 |
+
0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75,
|
| 16 |
+
0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe,
|
| 17 |
+
0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08,
|
| 18 |
+
0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8,
|
| 19 |
+
0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e,
|
| 20 |
+
0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285,
|
| 21 |
+
0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306,
|
| 22 |
+
0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02,
|
| 23 |
+
0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02,
|
| 24 |
+
0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489,
|
| 25 |
+
0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f,
|
| 26 |
+
0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e,
|
| 27 |
+
0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8,
|
| 28 |
+
0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73,
|
| 29 |
+
0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271,
|
| 30 |
+
0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75,
|
| 31 |
+
0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6,
|
| 32 |
+
0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d,
|
| 33 |
+
0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b,
|
| 34 |
+
0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416,
|
| 35 |
+
0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0,
|
| 36 |
+
0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b,
|
| 37 |
+
0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8,
|
| 38 |
+
0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec,
|
| 39 |
+
0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee,
|
| 40 |
+
0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965,
|
| 41 |
+
0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693,
|
| 42 |
+
0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2,
|
| 43 |
+
0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14,
|
| 44 |
+
0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f,
|
| 45 |
+
0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f,
|
| 46 |
+
0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b,
|
| 47 |
+
0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18,
|
| 48 |
+
0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793,
|
| 49 |
+
0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865,
|
| 50 |
+
0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495,
|
| 51 |
+
0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63,
|
| 52 |
+
0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8,
|
| 53 |
+
0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b,
|
| 54 |
+
0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f,
|
| 55 |
+
0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5,
|
| 56 |
+
0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e,
|
| 57 |
+
0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8,
|
| 58 |
+
0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9,
|
| 59 |
+
0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f,
|
| 60 |
+
0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4,
|
| 61 |
+
0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6,
|
| 62 |
+
0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2,
|
| 63 |
+
0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841,
|
| 64 |
+
0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca,
|
| 65 |
+
0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c,
|
| 66 |
+
0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce,
|
| 67 |
+
0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038,
|
| 68 |
+
0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3,
|
| 69 |
+
0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30,
|
| 70 |
+
0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734,
|
| 71 |
+
0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936,
|
| 72 |
+
0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd,
|
| 73 |
+
0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b,
|
| 74 |
+
0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a,
|
| 75 |
+
0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc,
|
| 76 |
+
0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47,
|
| 77 |
+
0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628,
|
| 78 |
+
0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c,
|
| 79 |
+
0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf,
|
| 80 |
+
0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124,
|
| 81 |
+
0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2,
|
| 82 |
+
0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222,
|
| 83 |
+
0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4,
|
| 84 |
+
0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f,
|
| 85 |
+
0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc,
|
| 86 |
+
0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8,
|
| 87 |
+
0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8,
|
| 88 |
+
0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053,
|
| 89 |
+
0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5,
|
| 90 |
+
0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4,
|
| 91 |
+
0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322,
|
| 92 |
+
0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9,
|
| 93 |
+
0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab,
|
| 94 |
+
0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf,
|
| 95 |
+
0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c,
|
| 96 |
+
0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7,
|
| 97 |
+
0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51,
|
| 98 |
+
0x29b7d047efec8728,
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA uint64_t
|
| 102 |
+
crc64impl(uint64_t accumulator, const char* data, size_t size) {
|
| 103 |
+
for (size_t i = 0; i < size; ++i) {
|
| 104 |
+
accumulator =
|
| 105 |
+
crc64_table[(accumulator ^ data[i]) & 0xFF] ^ (accumulator >> 8);
|
| 106 |
+
}
|
| 107 |
+
return accumulator;
|
| 108 |
+
}
|
| 109 |
+
} // namespace detail
|
| 110 |
+
|
| 111 |
+
struct crc64_t final : IdWrapper<crc64_t, uint64_t> {
|
| 112 |
+
constexpr crc64_t(uint64_t checksum) : IdWrapper(checksum) {}
|
| 113 |
+
constexpr uint64_t checksum() const {
|
| 114 |
+
return this->underlyingId();
|
| 115 |
+
}
|
| 116 |
+
};
|
| 117 |
+
|
| 118 |
+
// CRC64 with Jones coefficients and an init value of 0.
|
| 119 |
+
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t
|
| 120 |
+
crc64(const char* str, size_t size) {
|
| 121 |
+
return crc64_t{detail::crc64impl(0, str, size)};
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t crc64(c10::string_view str) {
|
| 125 |
+
return crc64(str.data(), str.size());
|
| 126 |
+
}
|
| 127 |
+
} // namespace c10::util
|
| 128 |
+
|
| 129 |
+
// Allow usage of crc64_t in std::unordered_set
|
| 130 |
+
C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::crc64_t);
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Exception.h
ADDED
|
@@ -0,0 +1,714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_EXCEPTION_H_
|
| 2 |
+
#define C10_UTIL_EXCEPTION_H_
|
| 3 |
+
|
| 4 |
+
#include <c10/macros/Export.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <c10/util/Backtrace.h>
|
| 7 |
+
#include <c10/util/Lazy.h>
|
| 8 |
+
#include <c10/util/StringUtil.h>
|
| 9 |
+
|
| 10 |
+
#include <cstdint>
|
| 11 |
+
#include <exception>
|
| 12 |
+
#include <memory>
|
| 13 |
+
#include <string>
|
| 14 |
+
#include <variant>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
#if defined(_MSC_VER) && _MSC_VER <= 1900
|
| 18 |
+
#define __func__ __FUNCTION__
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
namespace c10 {
|
| 22 |
+
|
| 23 |
+
/// The primary ATen error class.
|
| 24 |
+
/// Provides a complete error message with source location information via
|
| 25 |
+
/// `what()`, and a more concise message via `what_without_backtrace()`.
|
| 26 |
+
/// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead.
|
| 27 |
+
///
|
| 28 |
+
/// NB: c10::Error is handled specially by the default torch to suppress the
|
| 29 |
+
/// backtrace, see torch/csrc/Exceptions.h
|
| 30 |
+
class C10_API Error : public std::exception {
|
| 31 |
+
private:
|
| 32 |
+
// The actual error message.
|
| 33 |
+
std::string msg_;
|
| 34 |
+
|
| 35 |
+
// Context for the message (in order of decreasing specificity). Context will
|
| 36 |
+
// be automatically formatted appropriately, so it is not necessary to add
|
| 37 |
+
// extra leading/trailing newlines to strings inside this vector
|
| 38 |
+
std::vector<std::string> context_;
|
| 39 |
+
|
| 40 |
+
// The C++ backtrace at the point when this exception was raised. This
|
| 41 |
+
// may be empty if there is no valid backtrace. (We don't use optional
|
| 42 |
+
// here to reduce the dependencies this file has.)
|
| 43 |
+
Backtrace backtrace_;
|
| 44 |
+
|
| 45 |
+
// These two are derived fields from msg_stack_ and backtrace_, but we need
|
| 46 |
+
// fields for the strings so that we can return a const char* (as the
|
| 47 |
+
// signature of std::exception requires). Currently, the invariant
|
| 48 |
+
// is that these fields are ALWAYS populated consistently with respect
|
| 49 |
+
// to msg_stack_ and backtrace_.
|
| 50 |
+
mutable OptimisticLazy<std::string> what_;
|
| 51 |
+
std::string what_without_backtrace_;
|
| 52 |
+
|
| 53 |
+
// This is a little debugging trick: you can stash a relevant pointer
|
| 54 |
+
// in caller, and then when you catch the exception, you can compare
|
| 55 |
+
// against pointers you have on hand to get more information about
|
| 56 |
+
// where the exception came from. In Caffe2, this is used to figure
|
| 57 |
+
// out which operator raised an exception.
|
| 58 |
+
const void* caller_;
|
| 59 |
+
|
| 60 |
+
public:
|
| 61 |
+
// PyTorch-style Error constructor. NB: the implementation of this
|
| 62 |
+
// is actually in Logging.cpp
|
| 63 |
+
Error(SourceLocation source_location, std::string msg);
|
| 64 |
+
|
| 65 |
+
// Caffe2-style error message
|
| 66 |
+
Error(
|
| 67 |
+
const char* file,
|
| 68 |
+
const uint32_t line,
|
| 69 |
+
const char* condition,
|
| 70 |
+
const std::string& msg,
|
| 71 |
+
Backtrace backtrace,
|
| 72 |
+
const void* caller = nullptr);
|
| 73 |
+
|
| 74 |
+
// Base constructor
|
| 75 |
+
Error(
|
| 76 |
+
std::string msg,
|
| 77 |
+
Backtrace backtrace = nullptr,
|
| 78 |
+
const void* caller = nullptr);
|
| 79 |
+
|
| 80 |
+
// Add some new context to the message stack. The last added context
|
| 81 |
+
// will be formatted at the end of the context list upon printing.
|
| 82 |
+
// WARNING: This method is O(n) in the size of the stack, so don't go
|
| 83 |
+
// wild adding a ridiculous amount of context to error messages.
|
| 84 |
+
void add_context(std::string msg);
|
| 85 |
+
|
| 86 |
+
const std::string& msg() const {
|
| 87 |
+
return msg_;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
const std::vector<std::string>& context() const {
|
| 91 |
+
return context_;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
const Backtrace& backtrace() const;
|
| 95 |
+
|
| 96 |
+
/// Returns the complete error message, including the source location.
|
| 97 |
+
/// The returned pointer is invalidated if you call add_context() on
|
| 98 |
+
/// this object.
|
| 99 |
+
const char* what() const noexcept override;
|
| 100 |
+
|
| 101 |
+
const void* caller() const noexcept {
|
| 102 |
+
return caller_;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
/// Returns only the error message string, without source location.
|
| 106 |
+
/// The returned pointer is invalidated if you call add_context() on
|
| 107 |
+
/// this object.
|
| 108 |
+
virtual const char* what_without_backtrace() const noexcept {
|
| 109 |
+
return what_without_backtrace_.c_str();
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
private:
|
| 113 |
+
void refresh_what();
|
| 114 |
+
std::string compute_what(bool include_backtrace) const;
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
class C10_API Warning {
|
| 118 |
+
public:
|
| 119 |
+
class C10_API UserWarning {};
|
| 120 |
+
class C10_API DeprecationWarning {};
|
| 121 |
+
|
| 122 |
+
using warning_variant_t = std::variant<UserWarning, DeprecationWarning>;
|
| 123 |
+
|
| 124 |
+
Warning(
|
| 125 |
+
warning_variant_t type,
|
| 126 |
+
const SourceLocation& source_location,
|
| 127 |
+
std::string msg,
|
| 128 |
+
bool verbatim);
|
| 129 |
+
|
| 130 |
+
Warning(
|
| 131 |
+
warning_variant_t type,
|
| 132 |
+
SourceLocation source_location,
|
| 133 |
+
const char* msg,
|
| 134 |
+
bool verbatim);
|
| 135 |
+
|
| 136 |
+
Warning(
|
| 137 |
+
warning_variant_t type,
|
| 138 |
+
SourceLocation source_location,
|
| 139 |
+
::c10::detail::CompileTimeEmptyString msg,
|
| 140 |
+
bool verbatim);
|
| 141 |
+
|
| 142 |
+
// Getters for members
|
| 143 |
+
warning_variant_t type() const;
|
| 144 |
+
const SourceLocation& source_location() const;
|
| 145 |
+
const std::string& msg() const;
|
| 146 |
+
bool verbatim() const;
|
| 147 |
+
|
| 148 |
+
private:
|
| 149 |
+
// The type of warning
|
| 150 |
+
warning_variant_t type_;
|
| 151 |
+
|
| 152 |
+
// Where the warning happened.
|
| 153 |
+
SourceLocation source_location_;
|
| 154 |
+
|
| 155 |
+
// The actual warning message.
|
| 156 |
+
std::string msg_;
|
| 157 |
+
|
| 158 |
+
// See note: [Verbatim Warnings]
|
| 159 |
+
bool verbatim_;
|
| 160 |
+
};
|
| 161 |
+
|
| 162 |
+
using UserWarning = Warning::UserWarning;
|
| 163 |
+
using DeprecationWarning = Warning::DeprecationWarning;
|
| 164 |
+
|
| 165 |
+
// Issue a warning with a given message. Dispatched to the current
|
| 166 |
+
// warning handler.
|
| 167 |
+
void C10_API warn(const Warning& warning);
|
| 168 |
+
|
| 169 |
+
class C10_API WarningHandler {
|
| 170 |
+
public:
|
| 171 |
+
virtual ~WarningHandler() = default;
|
| 172 |
+
/// The default warning handler. Prints the message to stderr.
|
| 173 |
+
virtual void process(const Warning& warning);
|
| 174 |
+
};
|
| 175 |
+
|
| 176 |
+
namespace WarningUtils {
|
| 177 |
+
|
| 178 |
+
// Note: [Verbatim Warnings]
|
| 179 |
+
// Warnings originating in C++ code can appear out-of-place to Python users:
|
| 180 |
+
// a user runs a line in Python, but the warning references a line in C++.
|
| 181 |
+
// Some parts of PyTorch, like the JIT, are cognizant of this mismatch
|
| 182 |
+
// and take care to map warnings back to the user's program, but most
|
| 183 |
+
// of PyTorch simply throws a context-free warning. To allow warning
|
| 184 |
+
// handlers to add context where appropriate, warn takes the
|
| 185 |
+
// "verbatim" flag. When this is false a warning handler might append
|
| 186 |
+
// the C++ warning to a Python warning message that relates the warning
|
| 187 |
+
// back to the user's program. Callers who have already accounted for
|
| 188 |
+
// context in their warnings should set verbatim to true so their warnings
|
| 189 |
+
// appear without modification.
|
| 190 |
+
|
| 191 |
+
/// Sets the global warning handler. This is not thread-safe, so it should
|
| 192 |
+
/// generally be called once during initialization or while holding the GIL
|
| 193 |
+
/// for programs that use python.
|
| 194 |
+
/// User is responsible for keeping the WarningHandler alive until
|
| 195 |
+
/// it is not needed.
|
| 196 |
+
C10_API void set_warning_handler(WarningHandler* handler) noexcept(true);
|
| 197 |
+
/// Gets the global warning handler.
|
| 198 |
+
C10_API WarningHandler* get_warning_handler() noexcept(true);
|
| 199 |
+
|
| 200 |
+
class C10_API WarningHandlerGuard {
|
| 201 |
+
WarningHandler* prev_handler_;
|
| 202 |
+
|
| 203 |
+
public:
|
| 204 |
+
WarningHandlerGuard(WarningHandler* new_handler)
|
| 205 |
+
: prev_handler_(c10::WarningUtils::get_warning_handler()) {
|
| 206 |
+
c10::WarningUtils::set_warning_handler(new_handler);
|
| 207 |
+
}
|
| 208 |
+
~WarningHandlerGuard() {
|
| 209 |
+
c10::WarningUtils::set_warning_handler(prev_handler_);
|
| 210 |
+
}
|
| 211 |
+
};
|
| 212 |
+
|
| 213 |
+
/// The TORCH_WARN_ONCE macro is difficult to test for. Use
|
| 214 |
+
/// setWarnAlways(true) to turn it into TORCH_WARN, which can be
|
| 215 |
+
/// tested for more easily.
|
| 216 |
+
C10_API void set_warnAlways(bool) noexcept(true);
|
| 217 |
+
C10_API bool get_warnAlways() noexcept(true);
|
| 218 |
+
|
| 219 |
+
// A RAII guard that sets warn_always (not thread-local) on
|
| 220 |
+
// construction, and sets it back to the original value upon destruction.
|
| 221 |
+
struct C10_API WarnAlways {
|
| 222 |
+
public:
|
| 223 |
+
explicit WarnAlways(bool setting = true);
|
| 224 |
+
~WarnAlways();
|
| 225 |
+
|
| 226 |
+
private:
|
| 227 |
+
bool prev_setting;
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
} // namespace WarningUtils
|
| 231 |
+
|
| 232 |
+
// Like Error, but we always report the C++ backtrace, instead of only
|
| 233 |
+
// reporting when TORCH_SHOW_CPP_STACKTRACES
|
| 234 |
+
class C10_API ErrorAlwaysShowCppStacktrace : public Error {
|
| 235 |
+
using Error::Error;
|
| 236 |
+
const char* what_without_backtrace() const noexcept override {
|
| 237 |
+
return what();
|
| 238 |
+
}
|
| 239 |
+
};
|
| 240 |
+
|
| 241 |
+
// Used in ATen for out-of-bound indices that can reasonably only be detected
|
| 242 |
+
// lazily inside a kernel (See: advanced indexing). These turn into
|
| 243 |
+
// IndexError when they cross to Python.
|
| 244 |
+
class C10_API IndexError : public Error {
|
| 245 |
+
using Error::Error;
|
| 246 |
+
};
|
| 247 |
+
|
| 248 |
+
// Used in ATen for invalid values. These turn into
|
| 249 |
+
// ValueError when they cross to Python.
|
| 250 |
+
class C10_API ValueError : public Error {
|
| 251 |
+
using Error::Error;
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
// Used in ATen for invalid types. These turn into
|
| 255 |
+
// TypeError when they cross to Python.
|
| 256 |
+
class C10_API TypeError : public Error {
|
| 257 |
+
using Error::Error;
|
| 258 |
+
};
|
| 259 |
+
|
| 260 |
+
// Used in ATen for functionality that is not implemented. These turn into
|
| 261 |
+
// NotImplementedError when they cross to Python.
|
| 262 |
+
class C10_API NotImplementedError : public Error {
|
| 263 |
+
using Error::Error;
|
| 264 |
+
};
|
| 265 |
+
|
| 266 |
+
// Used in ATen for non finite indices. These turn into
|
| 267 |
+
// ExitException when they cross to Python.
|
| 268 |
+
class C10_API EnforceFiniteError : public Error {
|
| 269 |
+
using Error::Error;
|
| 270 |
+
};
|
| 271 |
+
|
| 272 |
+
// Used in Onnxifi backend lowering. These turn into
|
| 273 |
+
// ExitException when they cross to Python.
|
| 274 |
+
class C10_API OnnxfiBackendSystemError : public Error {
|
| 275 |
+
using Error::Error;
|
| 276 |
+
};
|
| 277 |
+
|
| 278 |
+
// Used for numerical errors from the linalg module. These
|
| 279 |
+
// turn into LinAlgError when they cross into Python.
|
| 280 |
+
class C10_API LinAlgError : public Error {
|
| 281 |
+
using Error::Error;
|
| 282 |
+
};
|
| 283 |
+
|
| 284 |
+
class C10_API OutOfMemoryError : public Error {
|
| 285 |
+
using Error::Error;
|
| 286 |
+
};
|
| 287 |
+
|
| 288 |
+
// Base error type for all distributed errors.
|
| 289 |
+
// These turn into DistError when they cross into Python.
|
| 290 |
+
class C10_API DistError : public Error {
|
| 291 |
+
using Error::Error;
|
| 292 |
+
};
|
| 293 |
+
|
| 294 |
+
// Used for collective communication library errors from the distributed module.
|
| 295 |
+
// These turn into DistBackendError when they cross into Python.
|
| 296 |
+
class C10_API DistBackendError : public DistError {
|
| 297 |
+
using DistError::DistError;
|
| 298 |
+
};
|
| 299 |
+
|
| 300 |
+
// Used for errors originating from the store.
|
| 301 |
+
// These turn into DistStoreError when they cross into Python.
|
| 302 |
+
class C10_API DistStoreError : public DistError {
|
| 303 |
+
using DistError::DistError;
|
| 304 |
+
};
|
| 305 |
+
|
| 306 |
+
// Used for errors originating from the TCP/IP stack and not from collective
|
| 307 |
+
// libraries. These turn into DistNetworkError when they cross into Python.
|
| 308 |
+
class C10_API DistNetworkError : public DistError {
|
| 309 |
+
using DistError::DistError;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
// A utility function to return an exception std::string by prepending its
|
| 313 |
+
// exception type before its what() content
|
| 314 |
+
C10_API std::string GetExceptionString(const std::exception& e);
|
| 315 |
+
|
| 316 |
+
} // namespace c10
|
| 317 |
+
|
| 318 |
+
// Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK
|
| 319 |
+
//
|
| 320 |
+
// Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a
|
| 321 |
+
// int32_t), which is different from the definition of `SourceLocation` that
|
| 322 |
+
// requires unsigned int (a.k.a uint32_t) and may cause a compile error with the
|
| 323 |
+
// message: error C2397: conversion from 'long' to 'uint32_t' requires a
|
| 324 |
+
// narrowing conversion Here the static cast is used to pass the build. if this
|
| 325 |
+
// is used inside a lambda the __func__ macro expands to operator(), which isn't
|
| 326 |
+
// very useful, but hard to fix in a macro so suppressing the warning.
|
| 327 |
+
#define C10_THROW_ERROR(err_type, msg) \
|
| 328 |
+
throw ::c10::err_type( \
|
| 329 |
+
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
|
| 330 |
+
|
| 331 |
+
#define C10_BUILD_ERROR(err_type, msg) \
|
| 332 |
+
::c10::err_type({__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
|
| 333 |
+
|
| 334 |
+
// Private helper macro for workaround MSVC misexpansion of nested macro
|
| 335 |
+
// invocations involving __VA_ARGS__. See
|
| 336 |
+
// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
|
| 337 |
+
#define C10_EXPAND_MSVC_WORKAROUND(x) x
|
| 338 |
+
|
| 339 |
+
// On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases
|
| 340 |
+
// where the unlikely expression may be a constant, use this macro to ensure
|
| 341 |
+
// return statement analysis keeps working (at the cost of not getting the
|
| 342 |
+
// likely/unlikely annotation on nvcc).
|
| 343 |
+
// https://github.com/pytorch/pytorch/issues/21418
|
| 344 |
+
//
|
| 345 |
+
// Currently, this is only used in the error reporting macros below. If you
|
| 346 |
+
// want to use it more generally, move me to Macros.h
|
| 347 |
+
//
|
| 348 |
+
// TODO: Brian Vaughan observed that we might be able to get this to work on
|
| 349 |
+
// nvcc by writing some sort of C++ overload that distinguishes constexpr inputs
|
| 350 |
+
// from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY
|
| 351 |
+
// in nvcc is causing us perf problems, this is not yet implemented, but this
|
| 352 |
+
// might be an interesting piece of C++ code for an intrepid bootcamper to
|
| 353 |
+
// write.
|
| 354 |
+
#if defined(__CUDACC__)
|
| 355 |
+
#define C10_UNLIKELY_OR_CONST(e) e
|
| 356 |
+
#else
|
| 357 |
+
#define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e)
|
| 358 |
+
#endif
|
| 359 |
+
|
| 360 |
+
// ----------------------------------------------------------------------------
|
| 361 |
+
// Error reporting macros
|
| 362 |
+
// ----------------------------------------------------------------------------
|
| 363 |
+
|
| 364 |
+
#ifdef STRIP_ERROR_MESSAGES
|
| 365 |
+
#define TORCH_RETHROW(e, ...) throw
|
| 366 |
+
#else
|
| 367 |
+
#define TORCH_RETHROW(e, ...) \
|
| 368 |
+
do { \
|
| 369 |
+
e.add_context(::c10::str(__VA_ARGS__)); \
|
| 370 |
+
throw; \
|
| 371 |
+
} while (false)
|
| 372 |
+
#endif
|
| 373 |
+
|
| 374 |
+
// A utility macro to provide assert()-like functionality; that is, enforcement
|
| 375 |
+
// of internal invariants in code. It supports an arbitrary number of extra
|
| 376 |
+
// arguments (evaluated only on failure), which will be printed in the assert
|
| 377 |
+
// failure message using operator<< (this is useful to print some variables
|
| 378 |
+
// which may be useful for debugging.)
|
| 379 |
+
//
|
| 380 |
+
// Usage:
|
| 381 |
+
// TORCH_INTERNAL_ASSERT(should_be_true);
|
| 382 |
+
// TORCH_INTERNAL_ASSERT(x == 0, "x = ", x);
|
| 383 |
+
//
|
| 384 |
+
// Assuming no bugs in PyTorch, the conditions tested by this macro should
|
| 385 |
+
// always be true; e.g., it should be possible to disable all of these
|
| 386 |
+
// conditions without changing observable user behavior. If you would like to
|
| 387 |
+
// do error reporting for user input, please use TORCH_CHECK instead.
|
| 388 |
+
//
|
| 389 |
+
// NOTE: It is SAFE to use this macro in production code; on failure, this
|
| 390 |
+
// simply raises an exception, it does NOT unceremoniously quit the process
|
| 391 |
+
// (unlike assert()).
|
| 392 |
+
//
|
| 393 |
+
#ifdef STRIP_ERROR_MESSAGES
|
| 394 |
+
#define TORCH_INTERNAL_ASSERT(cond, ...) \
|
| 395 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
| 396 |
+
::c10::detail::torchCheckFail( \
|
| 397 |
+
__func__, \
|
| 398 |
+
__FILE__, \
|
| 399 |
+
static_cast<uint32_t>(__LINE__), \
|
| 400 |
+
#cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \
|
| 401 |
+
}
|
| 402 |
+
#else
|
| 403 |
+
// It would be nice if we could build a combined string literal out of
|
| 404 |
+
// the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal
|
| 405 |
+
// as the first argument, but there doesn't seem to be any good way to
|
| 406 |
+
// do that while still supporting having a first argument that isn't a
|
| 407 |
+
// string literal.
|
| 408 |
+
#define TORCH_INTERNAL_ASSERT(cond, ...) \
|
| 409 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
| 410 |
+
::c10::detail::torchInternalAssertFail( \
|
| 411 |
+
__func__, \
|
| 412 |
+
__FILE__, \
|
| 413 |
+
static_cast<uint32_t>(__LINE__), \
|
| 414 |
+
#cond \
|
| 415 |
+
" INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \
|
| 416 |
+
__LINE__) ", please report a bug to PyTorch. ", \
|
| 417 |
+
c10::str(__VA_ARGS__)); \
|
| 418 |
+
}
|
| 419 |
+
#endif
|
| 420 |
+
|
| 421 |
+
// A utility macro to make it easier to test for error conditions from user
|
| 422 |
+
// input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra
|
| 423 |
+
// arguments (evaluated only on failure), which will be printed in the error
|
| 424 |
+
// message using operator<< (e.g., you can pass any object which has
|
| 425 |
+
// operator<< defined. Most objects in PyTorch have these definitions!)
|
| 426 |
+
//
|
| 427 |
+
// Usage:
|
| 428 |
+
// TORCH_CHECK(should_be_true); // A default error message will be provided
|
| 429 |
+
// // in this case; but we recommend writing an
|
| 430 |
+
// // explicit error message, as it is more
|
| 431 |
+
// // user friendly.
|
| 432 |
+
// TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x);
|
| 433 |
+
//
|
| 434 |
+
// On failure, this macro will raise an exception. If this exception propagates
|
| 435 |
+
// to Python, it will convert into a Python RuntimeError.
|
| 436 |
+
//
|
| 437 |
+
// NOTE: It is SAFE to use this macro in production code; on failure, this
|
| 438 |
+
// simply raises an exception, it does NOT unceremoniously quit the process
|
| 439 |
+
// (unlike CHECK() from glog.)
|
| 440 |
+
//
|
| 441 |
+
#define TORCH_CHECK_WITH(error_t, cond, ...) \
|
| 442 |
+
TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__)
|
| 443 |
+
|
| 444 |
+
#ifdef STRIP_ERROR_MESSAGES
|
| 445 |
+
#define TORCH_CHECK_MSG(cond, type, ...) \
|
| 446 |
+
(#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__))
|
| 447 |
+
#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
|
| 448 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
| 449 |
+
C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
|
| 450 |
+
}
|
| 451 |
+
#else
|
| 452 |
+
|
| 453 |
+
namespace c10::detail {
|
| 454 |
+
template <typename... Args>
|
| 455 |
+
decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) {
|
| 456 |
+
return ::c10::str(args...);
|
| 457 |
+
}
|
| 458 |
+
inline C10_API const char* torchCheckMsgImpl(const char* msg) {
|
| 459 |
+
return msg;
|
| 460 |
+
}
|
| 461 |
+
// If there is just 1 user-provided C-string argument, use it.
|
| 462 |
+
inline C10_API const char* torchCheckMsgImpl(
|
| 463 |
+
const char* /*msg*/,
|
| 464 |
+
const char* args) {
|
| 465 |
+
return args;
|
| 466 |
+
}
|
| 467 |
+
} // namespace c10::detail
|
| 468 |
+
|
| 469 |
+
#define TORCH_CHECK_MSG(cond, type, ...) \
|
| 470 |
+
(::c10::detail::torchCheckMsgImpl( \
|
| 471 |
+
"Expected " #cond \
|
| 472 |
+
" to be true, but got false. " \
|
| 473 |
+
"(Could this error message be improved? If so, " \
|
| 474 |
+
"please report an enhancement request to PyTorch.)", \
|
| 475 |
+
##__VA_ARGS__))
|
| 476 |
+
#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
|
| 477 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
| 478 |
+
C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
|
| 479 |
+
}
|
| 480 |
+
#endif
|
| 481 |
+
|
| 482 |
+
namespace c10::detail {
|
| 483 |
+
|
| 484 |
+
[[noreturn]] C10_API void torchCheckFail(
|
| 485 |
+
const char* func,
|
| 486 |
+
const char* file,
|
| 487 |
+
uint32_t line,
|
| 488 |
+
const std::string& msg);
|
| 489 |
+
[[noreturn]] C10_API void torchCheckFail(
|
| 490 |
+
const char* func,
|
| 491 |
+
const char* file,
|
| 492 |
+
uint32_t line,
|
| 493 |
+
const char* msg);
|
| 494 |
+
|
| 495 |
+
// The c10::str() call that creates userMsg can have 1 of 3 return
|
| 496 |
+
// types depending on the number and types of arguments passed to
|
| 497 |
+
// TORCH_INTERNAL_ASSERT. 0 arguments will get a
|
| 498 |
+
// CompileTimeEmptyString, 1 const char * will be passed straight
|
| 499 |
+
// through, and anything else will get converted to std::string.
|
| 500 |
+
[[noreturn]] C10_API void torchInternalAssertFail(
|
| 501 |
+
const char* func,
|
| 502 |
+
const char* file,
|
| 503 |
+
uint32_t line,
|
| 504 |
+
const char* condMsg,
|
| 505 |
+
const char* userMsg);
|
| 506 |
+
[[noreturn]] inline C10_API void torchInternalAssertFail(
|
| 507 |
+
const char* func,
|
| 508 |
+
const char* file,
|
| 509 |
+
uint32_t line,
|
| 510 |
+
const char* condMsg,
|
| 511 |
+
::c10::detail::CompileTimeEmptyString /*userMsg*/) {
|
| 512 |
+
torchCheckFail(func, file, line, condMsg);
|
| 513 |
+
}
|
| 514 |
+
[[noreturn]] C10_API void torchInternalAssertFail(
|
| 515 |
+
const char* func,
|
| 516 |
+
const char* file,
|
| 517 |
+
uint32_t line,
|
| 518 |
+
const char* condMsg,
|
| 519 |
+
const std::string& userMsg);
|
| 520 |
+
|
| 521 |
+
} // namespace c10::detail
|
| 522 |
+
|
| 523 |
+
#ifdef STRIP_ERROR_MESSAGES
|
| 524 |
+
#define TORCH_CHECK(cond, ...) \
|
| 525 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
| 526 |
+
::c10::detail::torchCheckFail( \
|
| 527 |
+
__func__, \
|
| 528 |
+
__FILE__, \
|
| 529 |
+
static_cast<uint32_t>(__LINE__), \
|
| 530 |
+
TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \
|
| 531 |
+
}
|
| 532 |
+
#else
|
| 533 |
+
#define TORCH_CHECK(cond, ...) \
|
| 534 |
+
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
|
| 535 |
+
::c10::detail::torchCheckFail( \
|
| 536 |
+
__func__, \
|
| 537 |
+
__FILE__, \
|
| 538 |
+
static_cast<uint32_t>(__LINE__), \
|
| 539 |
+
TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \
|
| 540 |
+
}
|
| 541 |
+
#endif
|
| 542 |
+
|
| 543 |
+
// An utility macro that does what `TORCH_CHECK` does if compiled in the host
|
| 544 |
+
// code, otherwise does nothing. Supposed to be used in the code shared between
|
| 545 |
+
// host and device code as an alternative for `TORCH_CHECK`.
|
| 546 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 547 |
+
#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...)
|
| 548 |
+
#else
|
| 549 |
+
#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__)
|
| 550 |
+
#endif
|
| 551 |
+
|
| 552 |
+
// Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug
|
| 553 |
+
// build, and does nothing in release build. It is appropriate to use
|
| 554 |
+
// in situations where you want to add an assert to a hotpath, but it is
|
| 555 |
+
// too expensive to run this assert on production builds.
|
| 556 |
+
#ifdef NDEBUG
|
| 557 |
+
// Optimized version - generates no code.
|
| 558 |
+
#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
|
| 559 |
+
while (false) \
|
| 560 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
|
| 561 |
+
#else
|
| 562 |
+
#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
|
| 563 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
|
| 564 |
+
#endif
|
| 565 |
+
|
| 566 |
+
// TODO: We're going to get a lot of similar looking string literals
|
| 567 |
+
// this way; check if this actually affects binary size.
|
| 568 |
+
|
| 569 |
+
// Like TORCH_CHECK, but raises LinAlgError instead of Error.
|
| 570 |
+
#define TORCH_CHECK_LINALG(cond, ...) \
|
| 571 |
+
TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__)
|
| 572 |
+
|
| 573 |
+
// Like TORCH_CHECK, but raises IndexErrors instead of Errors.
|
| 574 |
+
#define TORCH_CHECK_INDEX(cond, ...) \
|
| 575 |
+
TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__)
|
| 576 |
+
|
| 577 |
+
// Like TORCH_CHECK, but raises ValueErrors instead of Errors.
|
| 578 |
+
#define TORCH_CHECK_VALUE(cond, ...) \
|
| 579 |
+
TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__)
|
| 580 |
+
|
| 581 |
+
// Like TORCH_CHECK, but raises TypeErrors instead of Errors.
|
| 582 |
+
#define TORCH_CHECK_TYPE(cond, ...) \
|
| 583 |
+
TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__)
|
| 584 |
+
|
| 585 |
+
// Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors.
|
| 586 |
+
#define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \
|
| 587 |
+
TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__)
|
| 588 |
+
|
| 589 |
+
#define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) \
|
| 590 |
+
TORCH_CHECK_WITH_MSG( \
|
| 591 |
+
ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__)
|
| 592 |
+
|
| 593 |
+
#ifdef STRIP_ERROR_MESSAGES
|
| 594 |
+
#define WARNING_MESSAGE_STRING(...) \
|
| 595 |
+
::c10::detail::CompileTimeEmptyString {}
|
| 596 |
+
#else
|
| 597 |
+
#define WARNING_MESSAGE_STRING(...) ::c10::str(__VA_ARGS__)
|
| 598 |
+
#endif
|
| 599 |
+
|
| 600 |
+
// Report a warning to the user. Accepts an arbitrary number of extra
|
| 601 |
+
// arguments which are concatenated into the warning message using operator<<
|
| 602 |
+
//
|
| 603 |
+
#ifdef DISABLE_WARN
|
| 604 |
+
#define _TORCH_WARN_WITH(...) ((void)0);
|
| 605 |
+
#else
|
| 606 |
+
#define _TORCH_WARN_WITH(warning_t, ...) \
|
| 607 |
+
::c10::warn(::c10::Warning( \
|
| 608 |
+
warning_t(), \
|
| 609 |
+
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
|
| 610 |
+
WARNING_MESSAGE_STRING(__VA_ARGS__), \
|
| 611 |
+
false));
|
| 612 |
+
#endif
|
| 613 |
+
|
| 614 |
+
#define TORCH_WARN(...) _TORCH_WARN_WITH(::c10::UserWarning, __VA_ARGS__);
|
| 615 |
+
|
| 616 |
+
#define TORCH_WARN_DEPRECATION(...) \
|
| 617 |
+
_TORCH_WARN_WITH(::c10::DeprecationWarning, __VA_ARGS__);
|
| 618 |
+
|
| 619 |
+
// Report a warning to the user only once. Accepts an arbitrary number of extra
|
| 620 |
+
// arguments which are concatenated into the warning message using operator<<
|
| 621 |
+
//
|
| 622 |
+
#define _TORCH_WARN_ONCE(...) \
|
| 623 |
+
C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \
|
| 624 |
+
[&] { \
|
| 625 |
+
TORCH_WARN(__VA_ARGS__); \
|
| 626 |
+
return true; \
|
| 627 |
+
}()
|
| 628 |
+
|
| 629 |
+
#ifdef DISABLE_WARN
|
| 630 |
+
#define TORCH_WARN_ONCE(...) ((void)0);
|
| 631 |
+
#else
|
| 632 |
+
#define TORCH_WARN_ONCE(...) \
|
| 633 |
+
if (::c10::WarningUtils::get_warnAlways()) { \
|
| 634 |
+
TORCH_WARN(__VA_ARGS__); \
|
| 635 |
+
} else { \
|
| 636 |
+
_TORCH_WARN_ONCE(__VA_ARGS__); \
|
| 637 |
+
}
|
| 638 |
+
#endif
|
| 639 |
+
|
| 640 |
+
// Report an error with a specific argument
|
| 641 |
+
// NOTE: using the argument name in TORCH_CHECK's message is preferred
|
| 642 |
+
#define TORCH_CHECK_ARG(cond, argN, ...) \
|
| 643 |
+
TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__)
|
| 644 |
+
|
| 645 |
+
// ----------------------------------------------------------------------------
|
| 646 |
+
// Deprecated macros
|
| 647 |
+
// ----------------------------------------------------------------------------
|
| 648 |
+
|
| 649 |
+
namespace c10::detail {
|
| 650 |
+
|
| 651 |
+
/*
|
| 652 |
+
// Deprecation disabled until we fix sites in our codebase
|
| 653 |
+
C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg)
|
| 654 |
+
instead.")
|
| 655 |
+
*/
|
| 656 |
+
inline void deprecated_AT_ERROR() {}
|
| 657 |
+
|
| 658 |
+
/*
|
| 659 |
+
// Deprecation disabled until we fix sites in our codebase
|
| 660 |
+
C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an
|
| 661 |
+
internal invariant failure, use " \
|
| 662 |
+
"TORCH_INTERNAL_ASSERT instead; if you mean to do user
|
| 663 |
+
error checking, use " \ "TORCH_CHECK. See
|
| 664 |
+
https://github.com/pytorch/pytorch/issues/20287 for more details.")
|
| 665 |
+
*/
|
| 666 |
+
inline void deprecated_AT_ASSERT() {}
|
| 667 |
+
|
| 668 |
+
/*
|
| 669 |
+
// Deprecation disabled until we fix sites in our codebase
|
| 670 |
+
C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an
|
| 671 |
+
internal invariant failure, use " \
|
| 672 |
+
"TORCH_INTERNAL_ASSERT instead; if you mean to do user
|
| 673 |
+
error checking, use " \ "TORCH_CHECK. See
|
| 674 |
+
https://github.com/pytorch/pytorch/issues/20287 for more details.")
|
| 675 |
+
*/
|
| 676 |
+
inline void deprecated_AT_ASSERTM() {}
|
| 677 |
+
|
| 678 |
+
} // namespace c10::detail
|
| 679 |
+
|
| 680 |
+
// Deprecated alias; this alias was deprecated because people kept mistakenly
|
| 681 |
+
// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK
|
| 682 |
+
// instead. See https://github.com/pytorch/pytorch/issues/20287 for more
|
| 683 |
+
// details.
|
| 684 |
+
#define AT_ASSERT(...) \
|
| 685 |
+
do { \
|
| 686 |
+
::c10::detail::deprecated_AT_ASSERT(); \
|
| 687 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \
|
| 688 |
+
} while (false)
|
| 689 |
+
|
| 690 |
+
// Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro
|
| 691 |
+
// supports both 0-ary and variadic calls, so having a separate
|
| 692 |
+
// message-accepting macro is not necessary.
|
| 693 |
+
//
|
| 694 |
+
// NB: we MUST include cond explicitly here, as MSVC will miscompile the macro
|
| 695 |
+
// expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround
|
| 696 |
+
// can be seen at
|
| 697 |
+
// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
|
| 698 |
+
#define AT_ASSERTM(cond, ...) \
|
| 699 |
+
do { \
|
| 700 |
+
::c10::detail::deprecated_AT_ASSERTM(); \
|
| 701 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \
|
| 702 |
+
} while (false)
|
| 703 |
+
|
| 704 |
+
// Deprecated alias; this alias was deprecated because it represents extra API
|
| 705 |
+
// surface that makes it hard for people to understand what macro to use.
|
| 706 |
+
// Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to
|
| 707 |
+
// unconditionally fail at a line of code.
|
| 708 |
+
#define AT_ERROR(...) \
|
| 709 |
+
do { \
|
| 710 |
+
::c10::detail::deprecated_AT_ERROR(); \
|
| 711 |
+
C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
|
| 712 |
+
} while (false)
|
| 713 |
+
|
| 714 |
+
#endif // C10_UTIL_EXCEPTION_H_
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwned.h
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <utility>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
// See example implementation in TensorBase.h and TensorBody.h.
|
| 8 |
+
// Synopsis:
|
| 9 |
+
//
|
| 10 |
+
// repr_type -- type to use to store an owned T in ExclusivelyOwned.
|
| 11 |
+
//
|
| 12 |
+
// pointer_type -- pointer-esque type to return from
|
| 13 |
+
// ExclusivelyOwned's get() and operator*() methods.
|
| 14 |
+
//
|
| 15 |
+
// const_pointer_type -- similar to pointer_type, used for the const methods.
|
| 16 |
+
//
|
| 17 |
+
// static repr_type nullRepr() -- return a null instance of repr_type.
|
| 18 |
+
//
|
| 19 |
+
// template <class... Args>
|
| 20 |
+
// static repr_type createInPlace(Args&&... args) -- used by the in-place
|
| 21 |
+
// ExclusivelyOwned constructor.
|
| 22 |
+
//
|
| 23 |
+
// static repr_type moveToRepr(T&& x) -- move the given x into an
|
| 24 |
+
// instance of repr_type. used by the ExclusivelyOwned(T&&)
|
| 25 |
+
// constructor.
|
| 26 |
+
//
|
| 27 |
+
// static void destroyOwned(repr_type x) -- free memory for a
|
| 28 |
+
// known-exclusively-owned instance of x. Replaces calling repr_type's
|
| 29 |
+
// destructor. Being able to implement this more efficiently than
|
| 30 |
+
// repr_type's destructor is the main reason to use ExclusivelyOwned
|
| 31 |
+
// for a type.
|
| 32 |
+
//
|
| 33 |
+
// static T take(repr_type&) -- move out of the given repr_type into an owned T.
|
| 34 |
+
//
|
| 35 |
+
// static pointer_type getImpl(const repr_type&) -- return a pointer
|
| 36 |
+
// to the given repr_type. May take repr_type by value if that is more
|
| 37 |
+
// efficient.
|
| 38 |
+
template <typename T>
|
| 39 |
+
struct ExclusivelyOwnedTraits;
|
| 40 |
+
|
| 41 |
+
/// ExclusivelyOwned is a smart-pointer-like wrapper around an
|
| 42 |
+
/// exclusively-owned instance of some type T that normally has
|
| 43 |
+
/// mandatory reference counting (currently just Tensor). If you have
|
| 44 |
+
/// an isolated piece of code that knows that it has sole ownership of
|
| 45 |
+
/// an object of one of these types (i.e., because you created it
|
| 46 |
+
/// directly or using a factory function) and that object will not
|
| 47 |
+
/// escape from that isolated piece of code, then moving the object
|
| 48 |
+
/// into an ExclusivelyOwned will avoid an atomic reference count
|
| 49 |
+
/// decrement at destruction time.
|
| 50 |
+
///
|
| 51 |
+
/// If you directly create the Tensor in the first
|
| 52 |
+
/// place, you can use the in_place constructor of ExclusivelyOwned to
|
| 53 |
+
/// additionally avoid doing any stores to initialize the refcount &
|
| 54 |
+
/// weakcount.
|
| 55 |
+
template <typename T>
|
| 56 |
+
class ExclusivelyOwned {
|
| 57 |
+
using EOT = ExclusivelyOwnedTraits<T>;
|
| 58 |
+
typename ExclusivelyOwnedTraits<T>::repr_type repr_;
|
| 59 |
+
|
| 60 |
+
public:
|
| 61 |
+
ExclusivelyOwned() : repr_(EOT::nullRepr()) {}
|
| 62 |
+
|
| 63 |
+
explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {}
|
| 64 |
+
|
| 65 |
+
template <class... Args>
|
| 66 |
+
explicit ExclusivelyOwned(std::in_place_t, Args&&... args)
|
| 67 |
+
: repr_(EOT::createInPlace(std::forward<Args>(args)...)) {}
|
| 68 |
+
|
| 69 |
+
ExclusivelyOwned(const ExclusivelyOwned&) = delete;
|
| 70 |
+
|
| 71 |
+
ExclusivelyOwned(ExclusivelyOwned&& rhs) noexcept
|
| 72 |
+
: repr_(std::move(rhs.repr_)) {
|
| 73 |
+
rhs.repr_ = EOT::nullRepr();
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
ExclusivelyOwned& operator=(const ExclusivelyOwned&) = delete;
|
| 77 |
+
|
| 78 |
+
ExclusivelyOwned& operator=(ExclusivelyOwned&& rhs) noexcept {
|
| 79 |
+
EOT::destroyOwned(repr_);
|
| 80 |
+
repr_ = std::move(rhs.repr_);
|
| 81 |
+
rhs.repr_ = EOT::nullRepr();
|
| 82 |
+
return *this;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
ExclusivelyOwned& operator=(T&& rhs) noexcept {
|
| 86 |
+
EOT::destroyOwned(repr_);
|
| 87 |
+
repr_ = EOT::moveToRepr(std::move(rhs));
|
| 88 |
+
return *this;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
~ExclusivelyOwned() {
|
| 92 |
+
EOT::destroyOwned(repr_);
|
| 93 |
+
// Don't bother to call the destructor of repr_, since we already
|
| 94 |
+
// did specialized destruction for the exclusively-owned case in
|
| 95 |
+
// destroyOwned!
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
// We don't provide this because it would require us to be able to
|
| 99 |
+
// differentiate an owned-but-empty T from a lack of T. This is
|
| 100 |
+
// particularly problematic for Tensor, which wants to use an
|
| 101 |
+
// undefined Tensor as its null state.
|
| 102 |
+
explicit operator bool() const noexcept = delete;
|
| 103 |
+
|
| 104 |
+
operator T() && {
|
| 105 |
+
return take();
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
// NOTE: the equivalent operation on MaybeOwned is a moving
|
| 109 |
+
// operator*. For ExclusivelyOwned, take() and operator*() may well
|
| 110 |
+
// have different return types, so they are different functions.
|
| 111 |
+
T take() && {
|
| 112 |
+
return EOT::take(repr_);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
typename EOT::const_pointer_type operator->() const {
|
| 116 |
+
return get();
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
typename EOT::const_pointer_type get() const {
|
| 120 |
+
return EOT::getImpl(repr_);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
typename EOT::pointer_type operator->() {
|
| 124 |
+
return get();
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
typename EOT::pointer_type get() {
|
| 128 |
+
return EOT::getImpl(repr_);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
std::remove_pointer_t<typename EOT::const_pointer_type>& operator*() const {
|
| 132 |
+
return *get();
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
std::remove_pointer_t<typename EOT::pointer_type>& operator*() {
|
| 136 |
+
return *get();
|
| 137 |
+
}
|
| 138 |
+
};
|
| 139 |
+
|
| 140 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/TensorImpl.h>
|
| 4 |
+
#include <c10/core/UndefinedTensorImpl.h>
|
| 5 |
+
|
| 6 |
+
#include <utility>
|
| 7 |
+
|
| 8 |
+
namespace c10 {
|
| 9 |
+
// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and
|
| 10 |
+
// at::TensorBase.
|
| 11 |
+
template <typename TensorType>
|
| 12 |
+
struct ExclusivelyOwnedTensorTraits {
|
| 13 |
+
using repr_type = TensorType;
|
| 14 |
+
using pointer_type = TensorType*;
|
| 15 |
+
using const_pointer_type = const TensorType*;
|
| 16 |
+
|
| 17 |
+
static repr_type nullRepr() {
|
| 18 |
+
return TensorType();
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
template <class... Args>
|
| 22 |
+
static repr_type createInPlace(Args&&... args) {
|
| 23 |
+
return TensorType(std::forward<Args>(args)...);
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
static repr_type moveToRepr(TensorType&& x) {
|
| 27 |
+
return std::move(x);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
static void destroyOwned(TensorType& x) {
|
| 31 |
+
TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl();
|
| 32 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 33 |
+
toDestroy != nullptr, "Tensor somehow got null TensorImpl?");
|
| 34 |
+
// May be 0 because UndefinedTensorImpl doesn't get its refcount
|
| 35 |
+
// incremented.
|
| 36 |
+
const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton();
|
| 37 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 38 |
+
toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined),
|
| 39 |
+
"ExclusivelyOwned<Tensor> destroyed with isUndefined ",
|
| 40 |
+
isUndefined,
|
| 41 |
+
" and refcount ",
|
| 42 |
+
toDestroy->refcount_,
|
| 43 |
+
", expected 1 or, if isUndefined, 0!");
|
| 44 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 45 |
+
toDestroy->weakcount_ == 1 ||
|
| 46 |
+
(toDestroy->weakcount_ == 0 &&
|
| 47 |
+
toDestroy == UndefinedTensorImpl::singleton()),
|
| 48 |
+
"ExclusivelyOwned<Tensor> destroyed with isUndefined ",
|
| 49 |
+
isUndefined,
|
| 50 |
+
" and weakcount ",
|
| 51 |
+
toDestroy->weakcount_,
|
| 52 |
+
", expected 1 or, if isUndefined, 0!");
|
| 53 |
+
if (!isUndefined) {
|
| 54 |
+
#ifndef NDEBUG
|
| 55 |
+
// Needed to pass the debug assertions in ~intrusive_ptr_target.
|
| 56 |
+
toDestroy->refcount_ = 0;
|
| 57 |
+
toDestroy->weakcount_ = 0;
|
| 58 |
+
#endif
|
| 59 |
+
delete toDestroy;
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
static TensorType take(TensorType& x) {
|
| 64 |
+
return std::move(x);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
static pointer_type getImpl(repr_type& x) {
|
| 68 |
+
return &x;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
static const_pointer_type getImpl(const repr_type& x) {
|
| 72 |
+
return &x;
|
| 73 |
+
}
|
| 74 |
+
};
|
| 75 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fn-inl.h
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <limits>
|
| 6 |
+
|
| 7 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 8 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
| 9 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
| 10 |
+
#endif
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
|
| 14 |
+
/// Constructors
|
| 15 |
+
|
| 16 |
+
inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value)
|
| 17 |
+
: x(detail::fp8e4m3fn_from_fp32_value(value)) {}
|
| 18 |
+
|
| 19 |
+
/// Implicit conversions
|
| 20 |
+
|
| 21 |
+
inline C10_HOST_DEVICE Float8_e4m3fn::operator float() const {
|
| 22 |
+
return detail::fp8e4m3fn_to_fp32_value(x);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
/// Special values helper
|
| 26 |
+
|
| 27 |
+
inline C10_HOST_DEVICE bool Float8_e4m3fn::isnan() const {
|
| 28 |
+
return (x & 0b01111111) == 0b01111111;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
/// Arithmetic
|
| 32 |
+
|
| 33 |
+
inline C10_HOST_DEVICE Float8_e4m3fn
|
| 34 |
+
operator+(const Float8_e4m3fn& a, const Float8_e4m3fn& b) {
|
| 35 |
+
return static_cast<float>(a) + static_cast<float>(b);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
inline C10_HOST_DEVICE Float8_e4m3fn
|
| 39 |
+
operator-(const Float8_e4m3fn& a, const Float8_e4m3fn& b) {
|
| 40 |
+
return static_cast<float>(a) - static_cast<float>(b);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
inline C10_HOST_DEVICE Float8_e4m3fn
|
| 44 |
+
operator*(const Float8_e4m3fn& a, const Float8_e4m3fn& b) {
|
| 45 |
+
return static_cast<float>(a) * static_cast<float>(b);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(
|
| 49 |
+
const Float8_e4m3fn& a,
|
| 50 |
+
const Float8_e4m3fn& b) __ubsan_ignore_float_divide_by_zero__ {
|
| 51 |
+
return static_cast<float>(a) / static_cast<float>(b);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(const Float8_e4m3fn& a) {
|
| 55 |
+
return -static_cast<float>(a);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator+=(
|
| 59 |
+
Float8_e4m3fn& a,
|
| 60 |
+
const Float8_e4m3fn& b) {
|
| 61 |
+
a = a + b;
|
| 62 |
+
return a;
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator-=(
|
| 66 |
+
Float8_e4m3fn& a,
|
| 67 |
+
const Float8_e4m3fn& b) {
|
| 68 |
+
a = a - b;
|
| 69 |
+
return a;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator*=(
|
| 73 |
+
Float8_e4m3fn& a,
|
| 74 |
+
const Float8_e4m3fn& b) {
|
| 75 |
+
a = a * b;
|
| 76 |
+
return a;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
inline C10_HOST_DEVICE Float8_e4m3fn& operator/=(
|
| 80 |
+
Float8_e4m3fn& a,
|
| 81 |
+
const Float8_e4m3fn& b) {
|
| 82 |
+
a = a / b;
|
| 83 |
+
return a;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
/// Arithmetic with floats
|
| 87 |
+
|
| 88 |
+
inline C10_HOST_DEVICE float operator+(Float8_e4m3fn a, float b) {
|
| 89 |
+
return static_cast<float>(a) + b;
|
| 90 |
+
}
|
| 91 |
+
inline C10_HOST_DEVICE float operator-(Float8_e4m3fn a, float b) {
|
| 92 |
+
return static_cast<float>(a) - b;
|
| 93 |
+
}
|
| 94 |
+
inline C10_HOST_DEVICE float operator*(Float8_e4m3fn a, float b) {
|
| 95 |
+
return static_cast<float>(a) * b;
|
| 96 |
+
}
|
| 97 |
+
inline C10_HOST_DEVICE float operator/(Float8_e4m3fn a, float b)
|
| 98 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 99 |
+
return static_cast<float>(a) / b;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
inline C10_HOST_DEVICE float operator+(float a, Float8_e4m3fn b) {
|
| 103 |
+
return a + static_cast<float>(b);
|
| 104 |
+
}
|
| 105 |
+
inline C10_HOST_DEVICE float operator-(float a, Float8_e4m3fn b) {
|
| 106 |
+
return a - static_cast<float>(b);
|
| 107 |
+
}
|
| 108 |
+
inline C10_HOST_DEVICE float operator*(float a, Float8_e4m3fn b) {
|
| 109 |
+
return a * static_cast<float>(b);
|
| 110 |
+
}
|
| 111 |
+
inline C10_HOST_DEVICE float operator/(float a, Float8_e4m3fn b)
|
| 112 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 113 |
+
return a / static_cast<float>(b);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e4m3fn& b) {
|
| 117 |
+
return a += static_cast<float>(b);
|
| 118 |
+
}
|
| 119 |
+
inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e4m3fn& b) {
|
| 120 |
+
return a -= static_cast<float>(b);
|
| 121 |
+
}
|
| 122 |
+
inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e4m3fn& b) {
|
| 123 |
+
return a *= static_cast<float>(b);
|
| 124 |
+
}
|
| 125 |
+
inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e4m3fn& b) {
|
| 126 |
+
return a /= static_cast<float>(b);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
/// Arithmetic with doubles
|
| 130 |
+
|
| 131 |
+
inline C10_HOST_DEVICE double operator+(Float8_e4m3fn a, double b) {
|
| 132 |
+
return static_cast<double>(a) + b;
|
| 133 |
+
}
|
| 134 |
+
inline C10_HOST_DEVICE double operator-(Float8_e4m3fn a, double b) {
|
| 135 |
+
return static_cast<double>(a) - b;
|
| 136 |
+
}
|
| 137 |
+
inline C10_HOST_DEVICE double operator*(Float8_e4m3fn a, double b) {
|
| 138 |
+
return static_cast<double>(a) * b;
|
| 139 |
+
}
|
| 140 |
+
inline C10_HOST_DEVICE double operator/(Float8_e4m3fn a, double b)
|
| 141 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 142 |
+
return static_cast<double>(a) / b;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
inline C10_HOST_DEVICE double operator+(double a, Float8_e4m3fn b) {
|
| 146 |
+
return a + static_cast<double>(b);
|
| 147 |
+
}
|
| 148 |
+
inline C10_HOST_DEVICE double operator-(double a, Float8_e4m3fn b) {
|
| 149 |
+
return a - static_cast<double>(b);
|
| 150 |
+
}
|
| 151 |
+
inline C10_HOST_DEVICE double operator*(double a, Float8_e4m3fn b) {
|
| 152 |
+
return a * static_cast<double>(b);
|
| 153 |
+
}
|
| 154 |
+
inline C10_HOST_DEVICE double operator/(double a, Float8_e4m3fn b)
|
| 155 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 156 |
+
return a / static_cast<double>(b);
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
/// Arithmetic with ints
|
| 160 |
+
|
| 161 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(Float8_e4m3fn a, int b) {
|
| 162 |
+
return a + static_cast<Float8_e4m3fn>(b);
|
| 163 |
+
}
|
| 164 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(Float8_e4m3fn a, int b) {
|
| 165 |
+
return a - static_cast<Float8_e4m3fn>(b);
|
| 166 |
+
}
|
| 167 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(Float8_e4m3fn a, int b) {
|
| 168 |
+
return a * static_cast<Float8_e4m3fn>(b);
|
| 169 |
+
}
|
| 170 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(Float8_e4m3fn a, int b) {
|
| 171 |
+
return a / static_cast<Float8_e4m3fn>(b);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(int a, Float8_e4m3fn b) {
|
| 175 |
+
return static_cast<Float8_e4m3fn>(a) + b;
|
| 176 |
+
}
|
| 177 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(int a, Float8_e4m3fn b) {
|
| 178 |
+
return static_cast<Float8_e4m3fn>(a) - b;
|
| 179 |
+
}
|
| 180 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(int a, Float8_e4m3fn b) {
|
| 181 |
+
return static_cast<Float8_e4m3fn>(a) * b;
|
| 182 |
+
}
|
| 183 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(int a, Float8_e4m3fn b) {
|
| 184 |
+
return static_cast<Float8_e4m3fn>(a) / b;
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
//// Arithmetic with int64_t
|
| 188 |
+
|
| 189 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(Float8_e4m3fn a, int64_t b) {
|
| 190 |
+
return a + static_cast<Float8_e4m3fn>(b);
|
| 191 |
+
}
|
| 192 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(Float8_e4m3fn a, int64_t b) {
|
| 193 |
+
return a - static_cast<Float8_e4m3fn>(b);
|
| 194 |
+
}
|
| 195 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(Float8_e4m3fn a, int64_t b) {
|
| 196 |
+
return a * static_cast<Float8_e4m3fn>(b);
|
| 197 |
+
}
|
| 198 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(Float8_e4m3fn a, int64_t b) {
|
| 199 |
+
return a / static_cast<Float8_e4m3fn>(b);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator+(int64_t a, Float8_e4m3fn b) {
|
| 203 |
+
return static_cast<Float8_e4m3fn>(a) + b;
|
| 204 |
+
}
|
| 205 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator-(int64_t a, Float8_e4m3fn b) {
|
| 206 |
+
return static_cast<Float8_e4m3fn>(a) - b;
|
| 207 |
+
}
|
| 208 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator*(int64_t a, Float8_e4m3fn b) {
|
| 209 |
+
return static_cast<Float8_e4m3fn>(a) * b;
|
| 210 |
+
}
|
| 211 |
+
inline C10_HOST_DEVICE Float8_e4m3fn operator/(int64_t a, Float8_e4m3fn b) {
|
| 212 |
+
return static_cast<Float8_e4m3fn>(a) / b;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
/// NOTE: we do not define comparisons directly and instead rely on the implicit
|
| 216 |
+
/// conversion from c10::Float8_e4m3fn to float.
|
| 217 |
+
|
| 218 |
+
} // namespace c10
|
| 219 |
+
|
| 220 |
+
namespace std {
|
| 221 |
+
|
| 222 |
+
template <>
|
| 223 |
+
class numeric_limits<c10::Float8_e4m3fn> {
|
| 224 |
+
public:
|
| 225 |
+
static constexpr bool is_specialized = true;
|
| 226 |
+
static constexpr bool is_signed = true;
|
| 227 |
+
static constexpr bool is_integer = false;
|
| 228 |
+
static constexpr bool is_exact = false;
|
| 229 |
+
static constexpr bool has_infinity = false;
|
| 230 |
+
static constexpr bool has_quiet_NaN = true;
|
| 231 |
+
static constexpr bool has_signaling_NaN = false;
|
| 232 |
+
static constexpr auto has_denorm = true;
|
| 233 |
+
static constexpr auto has_denorm_loss = true;
|
| 234 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
| 235 |
+
static constexpr bool is_iec559 = false;
|
| 236 |
+
static constexpr bool is_bounded = true;
|
| 237 |
+
static constexpr bool is_modulo = false;
|
| 238 |
+
static constexpr int digits = 4;
|
| 239 |
+
static constexpr int digits10 = 0;
|
| 240 |
+
static constexpr int max_digits10 = 3;
|
| 241 |
+
static constexpr int radix = 2;
|
| 242 |
+
static constexpr int min_exponent = -5;
|
| 243 |
+
static constexpr int min_exponent10 = -1;
|
| 244 |
+
static constexpr int max_exponent = 8;
|
| 245 |
+
static constexpr int max_exponent10 = 2;
|
| 246 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
| 247 |
+
static constexpr auto tinyness_before = false;
|
| 248 |
+
|
| 249 |
+
static constexpr c10::Float8_e4m3fn min() {
|
| 250 |
+
return c10::Float8_e4m3fn(0x08, c10::Float8_e4m3fn::from_bits());
|
| 251 |
+
}
|
| 252 |
+
static constexpr c10::Float8_e4m3fn lowest() {
|
| 253 |
+
return c10::Float8_e4m3fn(0xFE, c10::Float8_e4m3fn::from_bits());
|
| 254 |
+
}
|
| 255 |
+
static constexpr c10::Float8_e4m3fn max() {
|
| 256 |
+
return c10::Float8_e4m3fn(0x7E, c10::Float8_e4m3fn::from_bits());
|
| 257 |
+
}
|
| 258 |
+
static constexpr c10::Float8_e4m3fn epsilon() {
|
| 259 |
+
return c10::Float8_e4m3fn(0x20, c10::Float8_e4m3fn::from_bits());
|
| 260 |
+
}
|
| 261 |
+
static constexpr c10::Float8_e4m3fn round_error() {
|
| 262 |
+
return c10::Float8_e4m3fn(0x30, c10::Float8_e4m3fn::from_bits());
|
| 263 |
+
}
|
| 264 |
+
static constexpr c10::Float8_e4m3fn quiet_NaN() {
|
| 265 |
+
return c10::Float8_e4m3fn(0x7F, c10::Float8_e4m3fn::from_bits());
|
| 266 |
+
}
|
| 267 |
+
static constexpr c10::Float8_e4m3fn denorm_min() {
|
| 268 |
+
return c10::Float8_e4m3fn(0x01, c10::Float8_e4m3fn::from_bits());
|
| 269 |
+
}
|
| 270 |
+
};
|
| 271 |
+
|
| 272 |
+
} // namespace std
|
| 273 |
+
|
| 274 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e4m3fnuz-inl.h
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
#include <c10/util/Float8_fnuz_cvt.h>
|
| 5 |
+
#include <cstring>
|
| 6 |
+
#include <limits>
|
| 7 |
+
|
| 8 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 9 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
|
| 10 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
namespace c10 {
|
| 14 |
+
|
| 15 |
+
/// Constructors
|
| 16 |
+
|
| 17 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz::Float8_e4m3fnuz(float value)
|
| 18 |
+
: x(detail::fp8e4m3fnuz_from_fp32_value(value)) {}
|
| 19 |
+
|
| 20 |
+
/// Implicit conversions
|
| 21 |
+
|
| 22 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz::operator float() const {
|
| 23 |
+
return detail::fp8_fnuz_to_fp32_value<4, 3>(x);
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
/// Special values helper
|
| 27 |
+
|
| 28 |
+
inline C10_HOST_DEVICE bool Float8_e4m3fnuz::isnan() const {
|
| 29 |
+
return x == 0b10000000;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
/// Arithmetic
|
| 33 |
+
|
| 34 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz
|
| 35 |
+
operator+(const Float8_e4m3fnuz& a, const Float8_e4m3fnuz& b) {
|
| 36 |
+
return static_cast<float>(a) + static_cast<float>(b);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz
|
| 40 |
+
operator-(const Float8_e4m3fnuz& a, const Float8_e4m3fnuz& b) {
|
| 41 |
+
return static_cast<float>(a) - static_cast<float>(b);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz
|
| 45 |
+
operator*(const Float8_e4m3fnuz& a, const Float8_e4m3fnuz& b) {
|
| 46 |
+
return static_cast<float>(a) * static_cast<float>(b);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator/(
|
| 50 |
+
const Float8_e4m3fnuz& a,
|
| 51 |
+
const Float8_e4m3fnuz& b) __ubsan_ignore_float_divide_by_zero__ {
|
| 52 |
+
return static_cast<float>(a) / static_cast<float>(b);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator-(const Float8_e4m3fnuz& a) {
|
| 56 |
+
return -static_cast<float>(a);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz& operator+=(
|
| 60 |
+
Float8_e4m3fnuz& a,
|
| 61 |
+
const Float8_e4m3fnuz& b) {
|
| 62 |
+
a = a + b;
|
| 63 |
+
return a;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz& operator-=(
|
| 67 |
+
Float8_e4m3fnuz& a,
|
| 68 |
+
const Float8_e4m3fnuz& b) {
|
| 69 |
+
a = a - b;
|
| 70 |
+
return a;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz& operator*=(
|
| 74 |
+
Float8_e4m3fnuz& a,
|
| 75 |
+
const Float8_e4m3fnuz& b) {
|
| 76 |
+
a = a * b;
|
| 77 |
+
return a;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz& operator/=(
|
| 81 |
+
Float8_e4m3fnuz& a,
|
| 82 |
+
const Float8_e4m3fnuz& b) {
|
| 83 |
+
a = a / b;
|
| 84 |
+
return a;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/// Arithmetic with floats
|
| 88 |
+
|
| 89 |
+
inline C10_HOST_DEVICE float operator+(Float8_e4m3fnuz a, float b) {
|
| 90 |
+
return static_cast<float>(a) + b;
|
| 91 |
+
}
|
| 92 |
+
inline C10_HOST_DEVICE float operator-(Float8_e4m3fnuz a, float b) {
|
| 93 |
+
return static_cast<float>(a) - b;
|
| 94 |
+
}
|
| 95 |
+
inline C10_HOST_DEVICE float operator*(Float8_e4m3fnuz a, float b) {
|
| 96 |
+
return static_cast<float>(a) * b;
|
| 97 |
+
}
|
| 98 |
+
inline C10_HOST_DEVICE float operator/(Float8_e4m3fnuz a, float b)
|
| 99 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 100 |
+
return static_cast<float>(a) / b;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
inline C10_HOST_DEVICE float operator+(float a, Float8_e4m3fnuz b) {
|
| 104 |
+
return a + static_cast<float>(b);
|
| 105 |
+
}
|
| 106 |
+
inline C10_HOST_DEVICE float operator-(float a, Float8_e4m3fnuz b) {
|
| 107 |
+
return a - static_cast<float>(b);
|
| 108 |
+
}
|
| 109 |
+
inline C10_HOST_DEVICE float operator*(float a, Float8_e4m3fnuz b) {
|
| 110 |
+
return a * static_cast<float>(b);
|
| 111 |
+
}
|
| 112 |
+
inline C10_HOST_DEVICE float operator/(float a, Float8_e4m3fnuz b)
|
| 113 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 114 |
+
return a / static_cast<float>(b);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e4m3fnuz& b) {
|
| 118 |
+
return a += static_cast<float>(b);
|
| 119 |
+
}
|
| 120 |
+
inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e4m3fnuz& b) {
|
| 121 |
+
return a -= static_cast<float>(b);
|
| 122 |
+
}
|
| 123 |
+
inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e4m3fnuz& b) {
|
| 124 |
+
return a *= static_cast<float>(b);
|
| 125 |
+
}
|
| 126 |
+
inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e4m3fnuz& b) {
|
| 127 |
+
return a /= static_cast<float>(b);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
/// Arithmetic with doubles
|
| 131 |
+
|
| 132 |
+
inline C10_HOST_DEVICE double operator+(Float8_e4m3fnuz a, double b) {
|
| 133 |
+
return static_cast<double>(a) + b;
|
| 134 |
+
}
|
| 135 |
+
inline C10_HOST_DEVICE double operator-(Float8_e4m3fnuz a, double b) {
|
| 136 |
+
return static_cast<double>(a) - b;
|
| 137 |
+
}
|
| 138 |
+
inline C10_HOST_DEVICE double operator*(Float8_e4m3fnuz a, double b) {
|
| 139 |
+
return static_cast<double>(a) * b;
|
| 140 |
+
}
|
| 141 |
+
inline C10_HOST_DEVICE double operator/(Float8_e4m3fnuz a, double b)
|
| 142 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 143 |
+
return static_cast<double>(a) / b;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
inline C10_HOST_DEVICE double operator+(double a, Float8_e4m3fnuz b) {
|
| 147 |
+
return a + static_cast<double>(b);
|
| 148 |
+
}
|
| 149 |
+
inline C10_HOST_DEVICE double operator-(double a, Float8_e4m3fnuz b) {
|
| 150 |
+
return a - static_cast<double>(b);
|
| 151 |
+
}
|
| 152 |
+
inline C10_HOST_DEVICE double operator*(double a, Float8_e4m3fnuz b) {
|
| 153 |
+
return a * static_cast<double>(b);
|
| 154 |
+
}
|
| 155 |
+
inline C10_HOST_DEVICE double operator/(double a, Float8_e4m3fnuz b)
|
| 156 |
+
__ubsan_ignore_float_divide_by_zero__ {
|
| 157 |
+
return a / static_cast<double>(b);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
/// Arithmetic with ints
|
| 161 |
+
|
| 162 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator+(Float8_e4m3fnuz a, int b) {
|
| 163 |
+
return a + static_cast<Float8_e4m3fnuz>(b);
|
| 164 |
+
}
|
| 165 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator-(Float8_e4m3fnuz a, int b) {
|
| 166 |
+
return a - static_cast<Float8_e4m3fnuz>(b);
|
| 167 |
+
}
|
| 168 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator*(Float8_e4m3fnuz a, int b) {
|
| 169 |
+
return a * static_cast<Float8_e4m3fnuz>(b);
|
| 170 |
+
}
|
| 171 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator/(Float8_e4m3fnuz a, int b) {
|
| 172 |
+
return a / static_cast<Float8_e4m3fnuz>(b);
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator+(int a, Float8_e4m3fnuz b) {
|
| 176 |
+
return static_cast<Float8_e4m3fnuz>(a) + b;
|
| 177 |
+
}
|
| 178 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator-(int a, Float8_e4m3fnuz b) {
|
| 179 |
+
return static_cast<Float8_e4m3fnuz>(a) - b;
|
| 180 |
+
}
|
| 181 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator*(int a, Float8_e4m3fnuz b) {
|
| 182 |
+
return static_cast<Float8_e4m3fnuz>(a) * b;
|
| 183 |
+
}
|
| 184 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator/(int a, Float8_e4m3fnuz b) {
|
| 185 |
+
return static_cast<Float8_e4m3fnuz>(a) / b;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
//// Arithmetic with int64_t
|
| 189 |
+
|
| 190 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator+(Float8_e4m3fnuz a, int64_t b) {
|
| 191 |
+
return a + static_cast<Float8_e4m3fnuz>(b);
|
| 192 |
+
}
|
| 193 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator-(Float8_e4m3fnuz a, int64_t b) {
|
| 194 |
+
return a - static_cast<Float8_e4m3fnuz>(b);
|
| 195 |
+
}
|
| 196 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator*(Float8_e4m3fnuz a, int64_t b) {
|
| 197 |
+
return a * static_cast<Float8_e4m3fnuz>(b);
|
| 198 |
+
}
|
| 199 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator/(Float8_e4m3fnuz a, int64_t b) {
|
| 200 |
+
return a / static_cast<Float8_e4m3fnuz>(b);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator+(int64_t a, Float8_e4m3fnuz b) {
|
| 204 |
+
return static_cast<Float8_e4m3fnuz>(a) + b;
|
| 205 |
+
}
|
| 206 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator-(int64_t a, Float8_e4m3fnuz b) {
|
| 207 |
+
return static_cast<Float8_e4m3fnuz>(a) - b;
|
| 208 |
+
}
|
| 209 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator*(int64_t a, Float8_e4m3fnuz b) {
|
| 210 |
+
return static_cast<Float8_e4m3fnuz>(a) * b;
|
| 211 |
+
}
|
| 212 |
+
inline C10_HOST_DEVICE Float8_e4m3fnuz operator/(int64_t a, Float8_e4m3fnuz b) {
|
| 213 |
+
return static_cast<Float8_e4m3fnuz>(a) / b;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
/// NOTE: we do not define comparisons directly and instead rely on the implicit
|
| 217 |
+
/// conversion from c10::Float8_e4m3fnuz to float.
|
| 218 |
+
|
| 219 |
+
} // namespace c10
|
| 220 |
+
|
| 221 |
+
namespace std {
|
| 222 |
+
|
| 223 |
+
template <>
|
| 224 |
+
class numeric_limits<c10::Float8_e4m3fnuz> {
|
| 225 |
+
public:
|
| 226 |
+
static constexpr bool is_specialized = true;
|
| 227 |
+
static constexpr bool is_signed = true;
|
| 228 |
+
static constexpr bool is_integer = false;
|
| 229 |
+
static constexpr bool is_exact = false;
|
| 230 |
+
static constexpr bool has_infinity = false;
|
| 231 |
+
static constexpr bool has_quiet_NaN = true;
|
| 232 |
+
static constexpr bool has_signaling_NaN = false;
|
| 233 |
+
static constexpr auto has_denorm = true;
|
| 234 |
+
static constexpr auto has_denorm_loss = true;
|
| 235 |
+
static constexpr auto round_style = numeric_limits<float>::round_style;
|
| 236 |
+
static constexpr bool is_iec559 = false;
|
| 237 |
+
static constexpr bool is_bounded = true;
|
| 238 |
+
static constexpr bool is_modulo = false;
|
| 239 |
+
static constexpr int digits = 4;
|
| 240 |
+
static constexpr int digits10 = 0;
|
| 241 |
+
static constexpr int max_digits10 = 3;
|
| 242 |
+
static constexpr int radix = 2;
|
| 243 |
+
static constexpr int min_exponent = -6;
|
| 244 |
+
static constexpr int min_exponent10 = -1;
|
| 245 |
+
static constexpr int max_exponent = 8;
|
| 246 |
+
static constexpr int max_exponent10 = 2;
|
| 247 |
+
static constexpr auto traps = numeric_limits<float>::traps;
|
| 248 |
+
static constexpr auto tinyness_before = false;
|
| 249 |
+
|
| 250 |
+
static constexpr c10::Float8_e4m3fnuz min() {
|
| 251 |
+
return c10::Float8_e4m3fnuz(0x08, c10::Float8_e4m3fnuz::from_bits());
|
| 252 |
+
}
|
| 253 |
+
static constexpr c10::Float8_e4m3fnuz lowest() {
|
| 254 |
+
return c10::Float8_e4m3fnuz(0xFF, c10::Float8_e4m3fnuz::from_bits());
|
| 255 |
+
}
|
| 256 |
+
static constexpr c10::Float8_e4m3fnuz max() {
|
| 257 |
+
return c10::Float8_e4m3fnuz(0x7F, c10::Float8_e4m3fnuz::from_bits());
|
| 258 |
+
}
|
| 259 |
+
static constexpr c10::Float8_e4m3fnuz epsilon() {
|
| 260 |
+
return c10::Float8_e4m3fnuz(0x28, c10::Float8_e4m3fnuz::from_bits());
|
| 261 |
+
}
|
| 262 |
+
static constexpr c10::Float8_e4m3fnuz round_error() {
|
| 263 |
+
return c10::Float8_e4m3fnuz(0x38, c10::Float8_e4m3fnuz::from_bits());
|
| 264 |
+
}
|
| 265 |
+
static constexpr c10::Float8_e4m3fnuz infinity() {
|
| 266 |
+
// NaN (no infinities)
|
| 267 |
+
return c10::Float8_e4m3fnuz(0x80, c10::Float8_e4m3fnuz::from_bits());
|
| 268 |
+
}
|
| 269 |
+
static constexpr c10::Float8_e4m3fnuz quiet_NaN() {
|
| 270 |
+
return c10::Float8_e4m3fnuz(0x80, c10::Float8_e4m3fnuz::from_bits());
|
| 271 |
+
}
|
| 272 |
+
static constexpr c10::Float8_e4m3fnuz denorm_min() {
|
| 273 |
+
return c10::Float8_e4m3fnuz(0x01, c10::Float8_e4m3fnuz::from_bits());
|
| 274 |
+
}
|
| 275 |
+
};
|
| 276 |
+
|
| 277 |
+
} // namespace std
|
| 278 |
+
|
| 279 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/// Defines the Float8_e5m2 type (8-bit floating-point) including conversions
|
| 4 |
+
/// to standard C types and basic arithmetic operations. Note that arithmetic
|
| 5 |
+
/// operations are implemented by converting to floating point and
|
| 6 |
+
/// performing the operation in float32.
|
| 7 |
+
/// Binary configuration:
|
| 8 |
+
/// s eeeee mm
|
| 9 |
+
/// 1 sign bit
|
| 10 |
+
/// 5 exponent bits
|
| 11 |
+
/// 2 mantissa bits
|
| 12 |
+
/// bias = 15
|
| 13 |
+
///
|
| 14 |
+
/// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
|
| 15 |
+
/// and inspired by Half implementation from pytorch/c10/util/Half.h
|
| 16 |
+
|
| 17 |
+
#include <c10/util/Half.h>
|
| 18 |
+
|
| 19 |
+
namespace c10 {
|
| 20 |
+
|
| 21 |
+
namespace detail {
|
| 22 |
+
|
| 23 |
+
/*
|
| 24 |
+
* Convert a 8-bit floating-point number in fp8 E5M2 format, in bit
|
| 25 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
| 26 |
+
* format, in bit representation.
|
| 27 |
+
*
|
| 28 |
+
* @note The implementation doesn't use any floating-point operations.
|
| 29 |
+
*/
|
| 30 |
+
inline C10_HOST_DEVICE float fp8e5m2_to_fp32_value(uint8_t input) {
|
| 31 |
+
/*
|
| 32 |
+
* Extend the fp8 E5M2 number to 32 bits and shift to the
|
| 33 |
+
* upper part of the 32-bit word:
|
| 34 |
+
* +---+----+---+-----------------------------+
|
| 35 |
+
* | S |EEEEE|MM|0000 0000 0000 0000 0000 0000|
|
| 36 |
+
* +---+----+---+-----------------------------+
|
| 37 |
+
* Bits 31 26-30 24-25 0-23
|
| 38 |
+
*
|
| 39 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
| 40 |
+
* - zero bits.
|
| 41 |
+
*/
|
| 42 |
+
uint16_t half_representation = input;
|
| 43 |
+
half_representation <<= 8;
|
| 44 |
+
return fp16_ieee_to_fp32_value(half_representation);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/*
|
| 48 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
| 49 |
+
* 8-bit floating-point number in fp8 E5M2 format, in bit representation.
|
| 50 |
+
*/
|
| 51 |
+
inline C10_HOST_DEVICE uint8_t fp8e5m2_from_fp32_value(float f) {
|
| 52 |
+
/*
|
| 53 |
+
* Binary representation of fp32 infinity
|
| 54 |
+
* 0 11111111 00000000000000000000000
|
| 55 |
+
*/
|
| 56 |
+
constexpr uint32_t fp32_inf = UINT32_C(255) << 23;
|
| 57 |
+
|
| 58 |
+
/*
|
| 59 |
+
* Binary representation of 65536.0f, which is the first value
|
| 60 |
+
* not representable in fp8e5m2 range:
|
| 61 |
+
* 0 11111 00 - fp8e5m2
|
| 62 |
+
* 0 10001111 00000000000000000000000 - fp32
|
| 63 |
+
*/
|
| 64 |
+
constexpr uint32_t fp8_max = UINT32_C(143) << 23;
|
| 65 |
+
|
| 66 |
+
/*
|
| 67 |
+
* A mask for converting fp32 numbers lower than fp8e5m2 normal range
|
| 68 |
+
* into denorm representation
|
| 69 |
+
* magic number: ((127 - 15) + (23 - 2) + 1)
|
| 70 |
+
*/
|
| 71 |
+
constexpr uint32_t denorm_mask = UINT32_C(134) << 23;
|
| 72 |
+
|
| 73 |
+
uint32_t f_bits = fp32_to_bits(f);
|
| 74 |
+
uint8_t result = 0u;
|
| 75 |
+
|
| 76 |
+
/*
|
| 77 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
| 78 |
+
*
|
| 79 |
+
* +---+----------------------------------+
|
| 80 |
+
* | S |0000000 00000000 00000000 00000000|
|
| 81 |
+
* +---+----------------------------------+
|
| 82 |
+
* Bits 31 0-31
|
| 83 |
+
*/
|
| 84 |
+
const uint32_t sign = f_bits & UINT32_C(0x80000000);
|
| 85 |
+
|
| 86 |
+
/*
|
| 87 |
+
* Set sign bit to 0
|
| 88 |
+
*/
|
| 89 |
+
f_bits ^= sign;
|
| 90 |
+
|
| 91 |
+
if (f_bits >= fp8_max) {
|
| 92 |
+
// NaN - all exponent and mantissa bits set to 1
|
| 93 |
+
result = f_bits > fp32_inf ? UINT8_C(0x7F) : UINT8_C(0x7C);
|
| 94 |
+
} else {
|
| 95 |
+
if (f_bits < (UINT32_C(113) << 23)) {
|
| 96 |
+
// Input number is smaller than 2^(-14), which is the smallest
|
| 97 |
+
// fp8e5m2 normal number
|
| 98 |
+
f_bits =
|
| 99 |
+
fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
|
| 100 |
+
result = static_cast<uint8_t>(f_bits - denorm_mask);
|
| 101 |
+
} else {
|
| 102 |
+
// resulting mantissa is odd
|
| 103 |
+
uint32_t mant_odd = (f_bits >> 21) & 1;
|
| 104 |
+
|
| 105 |
+
// update exponent, rounding bias part 1
|
| 106 |
+
f_bits += ((uint32_t)(15 - 127) << 23) + 0xFFFFF;
|
| 107 |
+
|
| 108 |
+
// rounding bias part 2
|
| 109 |
+
f_bits += mant_odd;
|
| 110 |
+
|
| 111 |
+
// take the bits!
|
| 112 |
+
result = static_cast<uint8_t>(f_bits >> 21);
|
| 113 |
+
}
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
result |= static_cast<uint8_t>(sign >> 24);
|
| 117 |
+
return result;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
} // namespace detail
|
| 121 |
+
|
| 122 |
+
struct alignas(1) Float8_e5m2 {
|
| 123 |
+
uint8_t x;
|
| 124 |
+
|
| 125 |
+
struct from_bits_t {};
|
| 126 |
+
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
|
| 127 |
+
return from_bits_t();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
Float8_e5m2() = default;
|
| 131 |
+
|
| 132 |
+
constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t) : x(bits) {}
|
| 133 |
+
inline C10_HOST_DEVICE Float8_e5m2(float value);
|
| 134 |
+
inline C10_HOST_DEVICE operator float() const;
|
| 135 |
+
inline C10_HOST_DEVICE bool isnan() const;
|
| 136 |
+
inline C10_HOST_DEVICE bool isinf() const;
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
C10_API inline std::ostream& operator<<(
|
| 140 |
+
std::ostream& out,
|
| 141 |
+
const Float8_e5m2& value) {
|
| 142 |
+
out << (float)value;
|
| 143 |
+
return out;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
} // namespace c10
|
| 147 |
+
|
| 148 |
+
#include <c10/util/Float8_e5m2-inl.h> // IWYU pragma: keep
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/// Defines the Float8_e5m2fnuz type (8-bit floating-point) including
|
| 4 |
+
/// conversions to standard C types and basic arithmetic operations. Note that
|
| 5 |
+
/// arithmetic operations are implemented by converting to floating point and
|
| 6 |
+
/// performing the operation in float32.
|
| 7 |
+
/// Binary configuration remains the same as e5m2:
|
| 8 |
+
/// s eeeee mm
|
| 9 |
+
/// 1 sign bit
|
| 10 |
+
/// 5 exponent bits
|
| 11 |
+
/// 2 mantissa bits
|
| 12 |
+
/// The key differences that e5m2fnuz brings are:
|
| 13 |
+
/// bias = 16
|
| 14 |
+
/// no infinities or negative zero
|
| 15 |
+
/// NaN only when sign bit is 1, rest all 0s
|
| 16 |
+
///
|
| 17 |
+
/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
|
| 18 |
+
/// the existing Float8_e4m3fn implementation.
|
| 19 |
+
|
| 20 |
+
#include <c10/macros/Macros.h>
|
| 21 |
+
#include <c10/util/TypeSafeSignMath.h>
|
| 22 |
+
#include <c10/util/floating_point_utils.h>
|
| 23 |
+
|
| 24 |
+
#if defined(__cplusplus)
|
| 25 |
+
#include <cstdint>
|
| 26 |
+
#elif !defined(__OPENCL_VERSION__)
|
| 27 |
+
#include <math.h>
|
| 28 |
+
#include <stdint.h>
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
#include <iosfwd>
|
| 32 |
+
#include <ostream>
|
| 33 |
+
|
| 34 |
+
namespace c10 {
|
| 35 |
+
|
| 36 |
+
namespace detail {
|
| 37 |
+
|
| 38 |
+
/*
|
| 39 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
| 40 |
+
* 8-bit floating-point number in fp8 E5M2 format, in bit representation.
|
| 41 |
+
*/
|
| 42 |
+
inline C10_HOST_DEVICE uint8_t fp8e5m2fnuz_from_fp32_value(float f) {
|
| 43 |
+
/*
|
| 44 |
+
* Binary representation of 65536.0f, which is the first value not
|
| 45 |
+
* representable (i.e. the first value which would overflow in to the sign
|
| 46 |
+
* bit, resulting in a NaN) in fp8e4m3fnuz range:
|
| 47 |
+
* 1 00000 00 - fp8e5m2fnuz
|
| 48 |
+
* 0 10001111 00000000000000000000000 - fp32
|
| 49 |
+
*/
|
| 50 |
+
constexpr uint32_t fnuz_max = UINT32_C(0x8F) << 23;
|
| 51 |
+
|
| 52 |
+
/*
|
| 53 |
+
* A mask for converting fp32 numbers lower than fp8e5m2fnuz normal range
|
| 54 |
+
* into denormalized representation.
|
| 55 |
+
* magic number: ((127 - 16) + (23 - 2) + 1)
|
| 56 |
+
*/
|
| 57 |
+
constexpr uint32_t denorm_mask = UINT32_C(0x85) << 23;
|
| 58 |
+
|
| 59 |
+
uint32_t f_bits = fp32_to_bits(f);
|
| 60 |
+
uint32_t result = 0u;
|
| 61 |
+
|
| 62 |
+
/*
|
| 63 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
| 64 |
+
*
|
| 65 |
+
* +---+----------------------------------+
|
| 66 |
+
* | S |0000000 00000000 00000000 00000000|
|
| 67 |
+
* +---+----------------------------------+
|
| 68 |
+
* Bits 31 0-31
|
| 69 |
+
*/
|
| 70 |
+
const uint32_t sign = f_bits & UINT32_C(0x80000000);
|
| 71 |
+
|
| 72 |
+
/*
|
| 73 |
+
* Set sign bit to 0
|
| 74 |
+
*/
|
| 75 |
+
f_bits ^= sign;
|
| 76 |
+
|
| 77 |
+
if (f_bits >= fnuz_max) {
|
| 78 |
+
// NaN -- sign bit set to 1, rest 0s
|
| 79 |
+
return 0x80;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
if (f_bits < (UINT32_C(0x70) << 23) /* 2^-15 in float32 */) {
|
| 83 |
+
// Input exponent is less than -15, the smallest e5m2fnuz exponent, so the
|
| 84 |
+
// number will become subnormal.
|
| 85 |
+
f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
|
| 86 |
+
result = static_cast<uint8_t>(f_bits - denorm_mask);
|
| 87 |
+
if (result == 0) {
|
| 88 |
+
// fnuz types don't have negative zero.
|
| 89 |
+
return 0;
|
| 90 |
+
}
|
| 91 |
+
} else {
|
| 92 |
+
// resulting mantissa is odd
|
| 93 |
+
uint8_t mant_odd = (f_bits >> 21) & 1;
|
| 94 |
+
|
| 95 |
+
// update exponent, rounding bias part 1
|
| 96 |
+
f_bits += ((uint32_t)(16 - 127) << 23) + 0xFFFFF;
|
| 97 |
+
|
| 98 |
+
// rounding bias part 2
|
| 99 |
+
f_bits += mant_odd;
|
| 100 |
+
|
| 101 |
+
// take the bits!
|
| 102 |
+
result = static_cast<uint8_t>(f_bits >> 21);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
result |= sign >> 24;
|
| 106 |
+
return result;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
} // namespace detail
|
| 110 |
+
|
| 111 |
+
struct alignas(1) Float8_e5m2fnuz {
|
| 112 |
+
uint8_t x;
|
| 113 |
+
|
| 114 |
+
struct from_bits_t {};
|
| 115 |
+
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
|
| 116 |
+
return from_bits_t();
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
Float8_e5m2fnuz() = default;
|
| 120 |
+
|
| 121 |
+
constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t)
|
| 122 |
+
: x(bits) {}
|
| 123 |
+
inline C10_HOST_DEVICE Float8_e5m2fnuz(float value);
|
| 124 |
+
inline C10_HOST_DEVICE operator float() const;
|
| 125 |
+
inline C10_HOST_DEVICE bool isnan() const;
|
| 126 |
+
inline C10_HOST_DEVICE bool isinf() const;
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
C10_API inline std::ostream& operator<<(
|
| 130 |
+
std::ostream& out,
|
| 131 |
+
const Float8_e5m2fnuz& value) {
|
| 132 |
+
out << (float)value;
|
| 133 |
+
return out;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
} // namespace c10
|
| 137 |
+
|
| 138 |
+
#include <c10/util/Float8_e5m2fnuz-inl.h> // IWYU pragma: keep
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Gauge.h
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <memory>
|
| 4 |
+
#include <string_view>
|
| 5 |
+
|
| 6 |
+
#include <c10/macros/Macros.h>
|
| 7 |
+
#include <c10/util/SmallVector.h>
|
| 8 |
+
|
| 9 |
+
namespace c10::monitor {
|
| 10 |
+
namespace detail {
|
| 11 |
+
|
| 12 |
+
class GaugeImpl;
|
| 13 |
+
|
| 14 |
+
class GaugeBackendIf {
|
| 15 |
+
public:
|
| 16 |
+
virtual ~GaugeBackendIf() = default;
|
| 17 |
+
virtual void record(int64_t value) noexcept = 0;
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
class GaugeBackendFactoryIf {
|
| 21 |
+
public:
|
| 22 |
+
virtual ~GaugeBackendFactoryIf() = default;
|
| 23 |
+
|
| 24 |
+
// May return nullptr if the gauge will be ignored by the given backend.
|
| 25 |
+
virtual std::unique_ptr<GaugeBackendIf> create(
|
| 26 |
+
std::string_view key) noexcept = 0;
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
void C10_API registerGaugeBackend(std::unique_ptr<GaugeBackendFactoryIf>);
|
| 30 |
+
} // namespace detail
|
| 31 |
+
|
| 32 |
+
// A handle to a Gauge.
|
| 33 |
+
class C10_API GaugeHandle {
|
| 34 |
+
public:
|
| 35 |
+
explicit GaugeHandle(std::string_view key);
|
| 36 |
+
void record(int64_t value);
|
| 37 |
+
|
| 38 |
+
private:
|
| 39 |
+
detail::GaugeImpl& impl_;
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
} // namespace c10::monitor
|
| 43 |
+
|
| 44 |
+
#define STATIC_GAUGE(_key) \
|
| 45 |
+
[]() -> ::c10::monitor::GaugeHandle& { \
|
| 46 |
+
static ::c10::monitor::GaugeHandle handle(#_key); \
|
| 47 |
+
return handle; \
|
| 48 |
+
}()
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Half.h
ADDED
|
@@ -0,0 +1,535 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/// Defines the Half type (half-precision floating-point) including conversions
|
| 4 |
+
/// to standard C types and basic arithmetic operations. Note that arithmetic
|
| 5 |
+
/// operations are implemented by converting to floating point and
|
| 6 |
+
/// performing the operation in float32, instead of using CUDA half intrinsics.
|
| 7 |
+
/// Most uses of this type within ATen are memory bound, including the
|
| 8 |
+
/// element-wise kernels, and the half intrinsics aren't efficient on all GPUs.
|
| 9 |
+
/// If you are writing a compute bound kernel, you can use the CUDA half
|
| 10 |
+
/// intrinsics directly on the Half type from device code.
|
| 11 |
+
|
| 12 |
+
#include <c10/macros/Export.h>
|
| 13 |
+
#include <c10/macros/Macros.h>
|
| 14 |
+
#include <c10/util/TypeSafeSignMath.h>
|
| 15 |
+
#include <c10/util/bit_cast.h>
|
| 16 |
+
#include <c10/util/complex.h>
|
| 17 |
+
#include <c10/util/floating_point_utils.h>
|
| 18 |
+
#include <type_traits>
|
| 19 |
+
|
| 20 |
+
#if defined(__cplusplus)
|
| 21 |
+
#include <cmath>
|
| 22 |
+
#elif !defined(__OPENCL_VERSION__)
|
| 23 |
+
#include <math.h>
|
| 24 |
+
#endif
|
| 25 |
+
|
| 26 |
+
#ifdef _MSC_VER
|
| 27 |
+
#include <intrin.h>
|
| 28 |
+
#endif
|
| 29 |
+
|
| 30 |
+
#include <cstdint>
|
| 31 |
+
#include <cstring>
|
| 32 |
+
#include <iosfwd>
|
| 33 |
+
#include <limits>
|
| 34 |
+
#include <ostream>
|
| 35 |
+
|
| 36 |
+
#ifdef __CUDACC__
|
| 37 |
+
#include <cuda_fp16.h>
|
| 38 |
+
#endif
|
| 39 |
+
|
| 40 |
+
#ifdef __HIPCC__
|
| 41 |
+
#include <hip/hip_fp16.h>
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
#if defined(CL_SYCL_LANGUAGE_VERSION)
|
| 45 |
+
#include <CL/sycl.hpp> // for SYCL 1.2.1
|
| 46 |
+
#elif defined(SYCL_LANGUAGE_VERSION)
|
| 47 |
+
#include <sycl/sycl.hpp> // for SYCL 2020
|
| 48 |
+
#endif
|
| 49 |
+
|
| 50 |
+
#if defined(__aarch64__) && !defined(__CUDACC__)
|
| 51 |
+
#include <arm_neon.h>
|
| 52 |
+
#endif
|
| 53 |
+
|
| 54 |
+
namespace c10 {
|
| 55 |
+
|
| 56 |
+
namespace detail {
|
| 57 |
+
|
| 58 |
+
/*
|
| 59 |
+
* Convert a 16-bit floating-point number in IEEE half-precision format, in bit
|
| 60 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
| 61 |
+
* format, in bit representation.
|
| 62 |
+
*
|
| 63 |
+
* @note The implementation doesn't use any floating-point operations.
|
| 64 |
+
*/
|
| 65 |
+
inline uint32_t fp16_ieee_to_fp32_bits(uint16_t h) {
|
| 66 |
+
/*
|
| 67 |
+
* Extend the half-precision floating-point number to 32 bits and shift to the
|
| 68 |
+
* upper part of the 32-bit word:
|
| 69 |
+
* +---+-----+------------+-------------------+
|
| 70 |
+
* | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
|
| 71 |
+
* +---+-----+------------+-------------------+
|
| 72 |
+
* Bits 31 26-30 16-25 0-15
|
| 73 |
+
*
|
| 74 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
| 75 |
+
* - zero bits.
|
| 76 |
+
*/
|
| 77 |
+
const uint32_t w = (uint32_t)h << 16;
|
| 78 |
+
/*
|
| 79 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
| 80 |
+
*
|
| 81 |
+
* +---+----------------------------------+
|
| 82 |
+
* | S |0000000 00000000 00000000 00000000|
|
| 83 |
+
* +---+----------------------------------+
|
| 84 |
+
* Bits 31 0-31
|
| 85 |
+
*/
|
| 86 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
| 87 |
+
/*
|
| 88 |
+
* Extract mantissa and biased exponent of the input number into the bits 0-30
|
| 89 |
+
* of the 32-bit word:
|
| 90 |
+
*
|
| 91 |
+
* +---+-----+------------+-------------------+
|
| 92 |
+
* | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
|
| 93 |
+
* +---+-----+------------+-------------------+
|
| 94 |
+
* Bits 30 27-31 17-26 0-16
|
| 95 |
+
*/
|
| 96 |
+
const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF);
|
| 97 |
+
/*
|
| 98 |
+
* Renorm shift is the number of bits to shift mantissa left to make the
|
| 99 |
+
* half-precision number normalized. If the initial number is normalized, some
|
| 100 |
+
* of its high 6 bits (sign == 0 and 5-bit exponent) equals one. In this case
|
| 101 |
+
* renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note
|
| 102 |
+
* that if we shift denormalized nonsign by renorm_shift, the unit bit of
|
| 103 |
+
* mantissa will shift into exponent, turning the biased exponent into 1, and
|
| 104 |
+
* making mantissa normalized (i.e. without leading 1).
|
| 105 |
+
*/
|
| 106 |
+
#ifdef _MSC_VER
|
| 107 |
+
unsigned long nonsign_bsr;
|
| 108 |
+
_BitScanReverse(&nonsign_bsr, (unsigned long)nonsign);
|
| 109 |
+
uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31;
|
| 110 |
+
#else
|
| 111 |
+
uint32_t renorm_shift = __builtin_clz(nonsign);
|
| 112 |
+
#endif
|
| 113 |
+
renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0;
|
| 114 |
+
/*
|
| 115 |
+
* Iff half-precision number has exponent of 15, the addition overflows
|
| 116 |
+
* it into bit 31, and the subsequent shift turns the high 9 bits
|
| 117 |
+
* into 1. Thus inf_nan_mask == 0x7F800000 if the half-precision number
|
| 118 |
+
* had exponent of 15 (i.e. was NaN or infinity) 0x00000000 otherwise
|
| 119 |
+
*/
|
| 120 |
+
const int32_t inf_nan_mask =
|
| 121 |
+
((int32_t)(nonsign + 0x04000000) >> 8) & INT32_C(0x7F800000);
|
| 122 |
+
/*
|
| 123 |
+
* Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31
|
| 124 |
+
* into 1. Otherwise, bit 31 remains 0. The signed shift right by 31
|
| 125 |
+
* broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask ==
|
| 126 |
+
* 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h)
|
| 127 |
+
* 0x00000000 otherwise
|
| 128 |
+
*/
|
| 129 |
+
const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31;
|
| 130 |
+
/*
|
| 131 |
+
* 1. Shift nonsign left by renorm_shift to normalize it (if the input
|
| 132 |
+
* was denormal)
|
| 133 |
+
* 2. Shift nonsign right by 3 so the exponent (5 bits originally)
|
| 134 |
+
* becomes an 8-bit field and 10-bit mantissa shifts into the 10 high
|
| 135 |
+
* bits of the 23-bit mantissa of IEEE single-precision number.
|
| 136 |
+
* 3. Add 0x70 to the exponent (starting at bit 23) to compensate the
|
| 137 |
+
* different in exponent bias (0x7F for single-precision number less 0xF
|
| 138 |
+
* for half-precision number).
|
| 139 |
+
* 4. Subtract renorm_shift from the exponent (starting at bit 23) to
|
| 140 |
+
* account for renormalization. As renorm_shift is less than 0x70, this
|
| 141 |
+
* can be combined with step 3.
|
| 142 |
+
* 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the
|
| 143 |
+
* input was NaN or infinity.
|
| 144 |
+
* 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent
|
| 145 |
+
* into zero if the input was zero.
|
| 146 |
+
* 7. Combine with the sign of the input number.
|
| 147 |
+
*/
|
| 148 |
+
return sign |
|
| 149 |
+
((((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) |
|
| 150 |
+
inf_nan_mask) &
|
| 151 |
+
~zero_mask);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
/*
|
| 155 |
+
* Convert a 16-bit floating-point number in IEEE half-precision format, in bit
|
| 156 |
+
* representation, to a 32-bit floating-point number in IEEE single-precision
|
| 157 |
+
* format.
|
| 158 |
+
*
|
| 159 |
+
* @note The implementation relies on IEEE-like (no assumption about rounding
|
| 160 |
+
* mode and no operations on denormals) floating-point operations and bitcasts
|
| 161 |
+
* between integer and floating-point variables.
|
| 162 |
+
*/
|
| 163 |
+
C10_HOST_DEVICE inline float fp16_ieee_to_fp32_value(uint16_t h) {
|
| 164 |
+
/*
|
| 165 |
+
* Extend the half-precision floating-point number to 32 bits and shift to the
|
| 166 |
+
* upper part of the 32-bit word:
|
| 167 |
+
* +---+-----+------------+-------------------+
|
| 168 |
+
* | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
|
| 169 |
+
* +---+-----+------------+-------------------+
|
| 170 |
+
* Bits 31 26-30 16-25 0-15
|
| 171 |
+
*
|
| 172 |
+
* S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
|
| 173 |
+
* - zero bits.
|
| 174 |
+
*/
|
| 175 |
+
const uint32_t w = (uint32_t)h << 16;
|
| 176 |
+
/*
|
| 177 |
+
* Extract the sign of the input number into the high bit of the 32-bit word:
|
| 178 |
+
*
|
| 179 |
+
* +---+----------------------------------+
|
| 180 |
+
* | S |0000000 00000000 00000000 00000000|
|
| 181 |
+
* +---+----------------------------------+
|
| 182 |
+
* Bits 31 0-31
|
| 183 |
+
*/
|
| 184 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
| 185 |
+
/*
|
| 186 |
+
* Extract mantissa and biased exponent of the input number into the high bits
|
| 187 |
+
* of the 32-bit word:
|
| 188 |
+
*
|
| 189 |
+
* +-----+------------+---------------------+
|
| 190 |
+
* |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000|
|
| 191 |
+
* +-----+------------+---------------------+
|
| 192 |
+
* Bits 27-31 17-26 0-16
|
| 193 |
+
*/
|
| 194 |
+
const uint32_t two_w = w + w;
|
| 195 |
+
|
| 196 |
+
/*
|
| 197 |
+
* Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become
|
| 198 |
+
* mantissa and exponent of a single-precision floating-point number:
|
| 199 |
+
*
|
| 200 |
+
* S|Exponent | Mantissa
|
| 201 |
+
* +-+---+-----+------------+----------------+
|
| 202 |
+
* |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000|
|
| 203 |
+
* +-+---+-----+------------+----------------+
|
| 204 |
+
* Bits | 23-31 | 0-22
|
| 205 |
+
*
|
| 206 |
+
* Next, there are some adjustments to the exponent:
|
| 207 |
+
* - The exponent needs to be corrected by the difference in exponent bias
|
| 208 |
+
* between single-precision and half-precision formats (0x7F - 0xF = 0x70)
|
| 209 |
+
* - Inf and NaN values in the inputs should become Inf and NaN values after
|
| 210 |
+
* conversion to the single-precision number. Therefore, if the biased
|
| 211 |
+
* exponent of the half-precision input was 0x1F (max possible value), the
|
| 212 |
+
* biased exponent of the single-precision output must be 0xFF (max possible
|
| 213 |
+
* value). We do this correction in two steps:
|
| 214 |
+
* - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset
|
| 215 |
+
* below) rather than by 0x70 suggested by the difference in the exponent bias
|
| 216 |
+
* (see above).
|
| 217 |
+
* - Then we multiply the single-precision result of exponent adjustment by
|
| 218 |
+
* 2**(-112) to reverse the effect of exponent adjustment by 0xE0 less the
|
| 219 |
+
* necessary exponent adjustment by 0x70 due to difference in exponent bias.
|
| 220 |
+
* The floating-point multiplication hardware would ensure than Inf and
|
| 221 |
+
* NaN would retain their value on at least partially IEEE754-compliant
|
| 222 |
+
* implementations.
|
| 223 |
+
*
|
| 224 |
+
* Note that the above operations do not handle denormal inputs (where biased
|
| 225 |
+
* exponent == 0). However, they also do not operate on denormal inputs, and
|
| 226 |
+
* do not produce denormal results.
|
| 227 |
+
*/
|
| 228 |
+
constexpr uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
| 229 |
+
// const float exp_scale = 0x1.0p-112f;
|
| 230 |
+
constexpr uint32_t scale_bits = (uint32_t)15 << 23;
|
| 231 |
+
float exp_scale_val = 0;
|
| 232 |
+
std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
|
| 233 |
+
const float exp_scale = exp_scale_val;
|
| 234 |
+
const float normalized_value =
|
| 235 |
+
fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
| 236 |
+
|
| 237 |
+
/*
|
| 238 |
+
* Convert denormalized half-precision inputs into single-precision results
|
| 239 |
+
* (always normalized). Zero inputs are also handled here.
|
| 240 |
+
*
|
| 241 |
+
* In a denormalized number the biased exponent is zero, and mantissa has
|
| 242 |
+
* on-zero bits. First, we shift mantissa into bits 0-9 of the 32-bit word.
|
| 243 |
+
*
|
| 244 |
+
* zeros | mantissa
|
| 245 |
+
* +---------------------------+------------+
|
| 246 |
+
* |0000 0000 0000 0000 0000 00|MM MMMM MMMM|
|
| 247 |
+
* +---------------------------+------------+
|
| 248 |
+
* Bits 10-31 0-9
|
| 249 |
+
*
|
| 250 |
+
* Now, remember that denormalized half-precision numbers are represented as:
|
| 251 |
+
* FP16 = mantissa * 2**(-24).
|
| 252 |
+
* The trick is to construct a normalized single-precision number with the
|
| 253 |
+
* same mantissa and thehalf-precision input and with an exponent which would
|
| 254 |
+
* scale the corresponding mantissa bits to 2**(-24). A normalized
|
| 255 |
+
* single-precision floating-point number is represented as: FP32 = (1 +
|
| 256 |
+
* mantissa * 2**(-23)) * 2**(exponent - 127) Therefore, when the biased
|
| 257 |
+
* exponent is 126, a unit change in the mantissa of the input denormalized
|
| 258 |
+
* half-precision number causes a change of the constructed single-precision
|
| 259 |
+
* number by 2**(-24), i.e. the same amount.
|
| 260 |
+
*
|
| 261 |
+
* The last step is to adjust the bias of the constructed single-precision
|
| 262 |
+
* number. When the input half-precision number is zero, the constructed
|
| 263 |
+
* single-precision number has the value of FP32 = 1 * 2**(126 - 127) =
|
| 264 |
+
* 2**(-1) = 0.5 Therefore, we need to subtract 0.5 from the constructed
|
| 265 |
+
* single-precision number to get the numerical equivalent of the input
|
| 266 |
+
* half-precision number.
|
| 267 |
+
*/
|
| 268 |
+
constexpr uint32_t magic_mask = UINT32_C(126) << 23;
|
| 269 |
+
constexpr float magic_bias = 0.5f;
|
| 270 |
+
const float denormalized_value =
|
| 271 |
+
fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
| 272 |
+
|
| 273 |
+
/*
|
| 274 |
+
* - Choose either results of conversion of input as a normalized number, or
|
| 275 |
+
* as a denormalized number, depending on the input exponent. The variable
|
| 276 |
+
* two_w contains input exponent in bits 27-31, therefore if its smaller than
|
| 277 |
+
* 2**27, the input is either a denormal number, or zero.
|
| 278 |
+
* - Combine the result of conversion of exponent and mantissa with the sign
|
| 279 |
+
* of the input number.
|
| 280 |
+
*/
|
| 281 |
+
constexpr uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
| 282 |
+
const uint32_t result = sign |
|
| 283 |
+
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value)
|
| 284 |
+
: fp32_to_bits(normalized_value));
|
| 285 |
+
return fp32_from_bits(result);
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
/*
|
| 289 |
+
* Convert a 32-bit floating-point number in IEEE single-precision format to a
|
| 290 |
+
* 16-bit floating-point number in IEEE half-precision format, in bit
|
| 291 |
+
* representation.
|
| 292 |
+
*
|
| 293 |
+
* @note The implementation relies on IEEE-like (no assumption about rounding
|
| 294 |
+
* mode and no operations on denormals) floating-point operations and bitcasts
|
| 295 |
+
* between integer and floating-point variables.
|
| 296 |
+
*/
|
| 297 |
+
inline uint16_t fp16_ieee_from_fp32_value(float f) {
|
| 298 |
+
// const float scale_to_inf = 0x1.0p+112f;
|
| 299 |
+
// const float scale_to_zero = 0x1.0p-110f;
|
| 300 |
+
constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
|
| 301 |
+
constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
|
| 302 |
+
float scale_to_inf_val = 0, scale_to_zero_val = 0;
|
| 303 |
+
std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
|
| 304 |
+
std::memcpy(
|
| 305 |
+
&scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
|
| 306 |
+
const float scale_to_inf = scale_to_inf_val;
|
| 307 |
+
const float scale_to_zero = scale_to_zero_val;
|
| 308 |
+
|
| 309 |
+
#if defined(_MSC_VER) && _MSC_VER == 1916
|
| 310 |
+
float base = ((signbit(f) != 0 ? -f : f) * scale_to_inf) * scale_to_zero;
|
| 311 |
+
#else
|
| 312 |
+
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
| 313 |
+
#endif
|
| 314 |
+
|
| 315 |
+
const uint32_t w = fp32_to_bits(f);
|
| 316 |
+
const uint32_t shl1_w = w + w;
|
| 317 |
+
const uint32_t sign = w & UINT32_C(0x80000000);
|
| 318 |
+
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
| 319 |
+
if (bias < UINT32_C(0x71000000)) {
|
| 320 |
+
bias = UINT32_C(0x71000000);
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
| 324 |
+
const uint32_t bits = fp32_to_bits(base);
|
| 325 |
+
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
| 326 |
+
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
| 327 |
+
const uint32_t nonsign = exp_bits + mantissa_bits;
|
| 328 |
+
return static_cast<uint16_t>(
|
| 329 |
+
(sign >> 16) |
|
| 330 |
+
(shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign));
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
#if defined(__aarch64__) && !defined(__CUDACC__)
|
| 334 |
+
inline float16_t fp16_from_bits(uint16_t h) {
|
| 335 |
+
return c10::bit_cast<float16_t>(h);
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
inline uint16_t fp16_to_bits(float16_t f) {
|
| 339 |
+
return c10::bit_cast<uint16_t>(f);
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
// According to https://godbolt.org/z/frExdbsWG it would translate to single
|
| 343 |
+
// fcvt s0, h0
|
| 344 |
+
inline float native_fp16_to_fp32_value(uint16_t h) {
|
| 345 |
+
return static_cast<float>(fp16_from_bits(h));
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
inline uint16_t native_fp16_from_fp32_value(float f) {
|
| 349 |
+
return fp16_to_bits(static_cast<float16_t>(f));
|
| 350 |
+
}
|
| 351 |
+
#endif
|
| 352 |
+
|
| 353 |
+
} // namespace detail
|
| 354 |
+
|
| 355 |
+
struct alignas(2) Half {
|
| 356 |
+
unsigned short x;
|
| 357 |
+
|
| 358 |
+
struct from_bits_t {};
|
| 359 |
+
C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
|
| 360 |
+
return from_bits_t();
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
// HIP wants __host__ __device__ tag, CUDA does not
|
| 364 |
+
#if defined(USE_ROCM)
|
| 365 |
+
C10_HOST_DEVICE Half() = default;
|
| 366 |
+
#else
|
| 367 |
+
Half() = default;
|
| 368 |
+
#endif
|
| 369 |
+
|
| 370 |
+
constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t) : x(bits) {}
|
| 371 |
+
#if defined(__aarch64__) && !defined(__CUDACC__)
|
| 372 |
+
inline Half(float16_t value);
|
| 373 |
+
inline operator float16_t() const;
|
| 374 |
+
#else
|
| 375 |
+
inline C10_HOST_DEVICE Half(float value);
|
| 376 |
+
inline C10_HOST_DEVICE operator float() const;
|
| 377 |
+
#endif
|
| 378 |
+
|
| 379 |
+
#if defined(__CUDACC__) || defined(__HIPCC__)
|
| 380 |
+
inline C10_HOST_DEVICE Half(const __half& value);
|
| 381 |
+
inline C10_HOST_DEVICE operator __half() const;
|
| 382 |
+
#endif
|
| 383 |
+
#ifdef SYCL_LANGUAGE_VERSION
|
| 384 |
+
inline C10_HOST_DEVICE Half(const sycl::half& value);
|
| 385 |
+
inline C10_HOST_DEVICE operator sycl::half() const;
|
| 386 |
+
#endif
|
| 387 |
+
};
|
| 388 |
+
|
| 389 |
+
// TODO : move to complex.h
|
| 390 |
+
template <>
|
| 391 |
+
struct alignas(4) complex<Half> {
|
| 392 |
+
Half real_;
|
| 393 |
+
Half imag_;
|
| 394 |
+
|
| 395 |
+
// Constructors
|
| 396 |
+
complex() = default;
|
| 397 |
+
// Half constructor is not constexpr so the following constructor can't
|
| 398 |
+
// be constexpr
|
| 399 |
+
C10_HOST_DEVICE explicit inline complex(const Half& real, const Half& imag)
|
| 400 |
+
: real_(real), imag_(imag) {}
|
| 401 |
+
C10_HOST_DEVICE inline complex(const c10::complex<float>& value)
|
| 402 |
+
: real_(value.real()), imag_(value.imag()) {}
|
| 403 |
+
|
| 404 |
+
// Conversion operator
|
| 405 |
+
inline C10_HOST_DEVICE operator c10::complex<float>() const {
|
| 406 |
+
return {real_, imag_};
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
constexpr C10_HOST_DEVICE Half real() const {
|
| 410 |
+
return real_;
|
| 411 |
+
}
|
| 412 |
+
constexpr C10_HOST_DEVICE Half imag() const {
|
| 413 |
+
return imag_;
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
C10_HOST_DEVICE complex<Half>& operator+=(const complex<Half>& other) {
|
| 417 |
+
real_ = static_cast<float>(real_) + static_cast<float>(other.real_);
|
| 418 |
+
imag_ = static_cast<float>(imag_) + static_cast<float>(other.imag_);
|
| 419 |
+
return *this;
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
C10_HOST_DEVICE complex<Half>& operator-=(const complex<Half>& other) {
|
| 423 |
+
real_ = static_cast<float>(real_) - static_cast<float>(other.real_);
|
| 424 |
+
imag_ = static_cast<float>(imag_) - static_cast<float>(other.imag_);
|
| 425 |
+
return *this;
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
C10_HOST_DEVICE complex<Half>& operator*=(const complex<Half>& other) {
|
| 429 |
+
auto a = static_cast<float>(real_);
|
| 430 |
+
auto b = static_cast<float>(imag_);
|
| 431 |
+
auto c = static_cast<float>(other.real());
|
| 432 |
+
auto d = static_cast<float>(other.imag());
|
| 433 |
+
real_ = a * c - b * d;
|
| 434 |
+
imag_ = a * d + b * c;
|
| 435 |
+
return *this;
|
| 436 |
+
}
|
| 437 |
+
};
|
| 438 |
+
|
| 439 |
+
// In some versions of MSVC, there will be a compiler error when building.
|
| 440 |
+
// C4146: unary minus operator applied to unsigned type, result still unsigned
|
| 441 |
+
// C4804: unsafe use of type 'bool' in operation
|
| 442 |
+
// It can be addressed by disabling the following warning.
|
| 443 |
+
#ifdef _MSC_VER
|
| 444 |
+
#pragma warning(push)
|
| 445 |
+
#pragma warning(disable : 4146)
|
| 446 |
+
#pragma warning(disable : 4804)
|
| 447 |
+
#pragma warning(disable : 4018)
|
| 448 |
+
#endif
|
| 449 |
+
|
| 450 |
+
// The overflow checks may involve float to int conversion which may
|
| 451 |
+
// trigger precision loss warning. Re-enable the warning once the code
|
| 452 |
+
// is fixed. See T58053069.
|
| 453 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 454 |
+
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
|
| 455 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
|
| 456 |
+
#endif
|
| 457 |
+
|
| 458 |
+
// bool can be converted to any type.
|
| 459 |
+
// Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build:
|
| 460 |
+
// `error: comparison of constant '255' with boolean expression is always false`
|
| 461 |
+
// for `f > limit::max()` below
|
| 462 |
+
template <typename To, typename From>
|
| 463 |
+
std::enable_if_t<std::is_same_v<From, bool>, bool> overflows(
|
| 464 |
+
From /*f*/,
|
| 465 |
+
bool strict_unsigned [[maybe_unused]] = false) {
|
| 466 |
+
return false;
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
// skip isnan and isinf check for integral types
|
| 470 |
+
template <typename To, typename From>
|
| 471 |
+
std::enable_if_t<std::is_integral_v<From> && !std::is_same_v<From, bool>, bool>
|
| 472 |
+
overflows(From f, bool strict_unsigned = false) {
|
| 473 |
+
using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
|
| 474 |
+
if constexpr (!limit::is_signed && std::numeric_limits<From>::is_signed) {
|
| 475 |
+
// allow for negative numbers to wrap using two's complement arithmetic.
|
| 476 |
+
// For example, with uint8, this allows for `a - b` to be treated as
|
| 477 |
+
// `a + 255 * b`.
|
| 478 |
+
if (!strict_unsigned) {
|
| 479 |
+
return greater_than_max<To>(f) ||
|
| 480 |
+
(c10::is_negative(f) &&
|
| 481 |
+
-static_cast<uint64_t>(f) > static_cast<uint64_t>(limit::max()));
|
| 482 |
+
}
|
| 483 |
+
}
|
| 484 |
+
return c10::less_than_lowest<To>(f) || greater_than_max<To>(f);
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
template <typename To, typename From>
|
| 488 |
+
std::enable_if_t<std::is_floating_point_v<From>, bool> overflows(
|
| 489 |
+
From f,
|
| 490 |
+
bool strict_unsigned [[maybe_unused]] = false) {
|
| 491 |
+
using limit = std::numeric_limits<typename scalar_value_type<To>::type>;
|
| 492 |
+
if (limit::has_infinity && std::isinf(static_cast<double>(f))) {
|
| 493 |
+
return false;
|
| 494 |
+
}
|
| 495 |
+
if (!limit::has_quiet_NaN && (f != f)) {
|
| 496 |
+
return true;
|
| 497 |
+
}
|
| 498 |
+
return f < limit::lowest() || f > limit::max();
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
| 502 |
+
|
| 503 |
+
#ifdef _MSC_VER
|
| 504 |
+
#pragma warning(pop)
|
| 505 |
+
#endif
|
| 506 |
+
|
| 507 |
+
template <typename To, typename From>
|
| 508 |
+
std::enable_if_t<is_complex<From>::value, bool> overflows(
|
| 509 |
+
From f,
|
| 510 |
+
bool strict_unsigned = false) {
|
| 511 |
+
// casts from complex to real are considered to overflow if the
|
| 512 |
+
// imaginary component is non-zero
|
| 513 |
+
if (!is_complex<To>::value && f.imag() != 0) {
|
| 514 |
+
return true;
|
| 515 |
+
}
|
| 516 |
+
// Check for overflow componentwise
|
| 517 |
+
// (Technically, the imag overflow check is guaranteed to be false
|
| 518 |
+
// when !is_complex<To>, but any optimizer worth its salt will be
|
| 519 |
+
// able to figure it out.)
|
| 520 |
+
return overflows<
|
| 521 |
+
typename scalar_value_type<To>::type,
|
| 522 |
+
typename From::value_type>(f.real(), strict_unsigned) ||
|
| 523 |
+
overflows<
|
| 524 |
+
typename scalar_value_type<To>::type,
|
| 525 |
+
typename From::value_type>(f.imag(), strict_unsigned);
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
C10_API inline std::ostream& operator<<(std::ostream& out, const Half& value) {
|
| 529 |
+
out << (float)value;
|
| 530 |
+
return out;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
} // namespace c10
|
| 534 |
+
|
| 535 |
+
#include <c10/util/Half-inl.h> // IWYU pragma: keep
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstddef>
|
| 4 |
+
#include <functional>
|
| 5 |
+
#include <utility>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
|
| 9 |
+
/**
|
| 10 |
+
* This template simplifies generation of simple classes that wrap an id
|
| 11 |
+
* in a typesafe way. Namely, you can use it to create a very lightweight
|
| 12 |
+
* type that only offers equality comparators and hashing. Example:
|
| 13 |
+
*
|
| 14 |
+
* struct MyIdType final : IdWrapper<MyIdType, uint32_t> {
|
| 15 |
+
* constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {}
|
| 16 |
+
* };
|
| 17 |
+
*
|
| 18 |
+
* Then in the global top level namespace:
|
| 19 |
+
*
|
| 20 |
+
* C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType);
|
| 21 |
+
*
|
| 22 |
+
* That's it - equality operators and hash functions are automatically defined
|
| 23 |
+
* for you, given the underlying type supports it.
|
| 24 |
+
*/
|
| 25 |
+
template <class ConcreteType, class UnderlyingType>
|
| 26 |
+
class IdWrapper {
|
| 27 |
+
public:
|
| 28 |
+
using underlying_type = UnderlyingType;
|
| 29 |
+
using concrete_type = ConcreteType;
|
| 30 |
+
|
| 31 |
+
protected:
|
| 32 |
+
constexpr explicit IdWrapper(underlying_type id) noexcept(
|
| 33 |
+
noexcept(underlying_type(std::declval<underlying_type>())))
|
| 34 |
+
: id_(id) {}
|
| 35 |
+
|
| 36 |
+
constexpr underlying_type underlyingId() const
|
| 37 |
+
noexcept(noexcept(underlying_type(std::declval<underlying_type>()))) {
|
| 38 |
+
return id_;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
private:
|
| 42 |
+
friend size_t hash_value(const concrete_type& v) {
|
| 43 |
+
return std::hash<underlying_type>()(v.id_);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
// TODO Making operator== noexcept if underlying type is noexcept equality
|
| 47 |
+
// comparable doesn't work with GCC 4.8.
|
| 48 |
+
// Fix this once we don't need GCC 4.8 anymore.
|
| 49 |
+
friend constexpr bool operator==(
|
| 50 |
+
const concrete_type& lhs,
|
| 51 |
+
const concrete_type& rhs) noexcept {
|
| 52 |
+
return lhs.id_ == rhs.id_;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// TODO Making operator!= noexcept if operator== is noexcept doesn't work with
|
| 56 |
+
// GCC 4.8.
|
| 57 |
+
// Fix this once we don't need GCC 4.8 anymore.
|
| 58 |
+
friend constexpr bool operator!=(
|
| 59 |
+
const concrete_type& lhs,
|
| 60 |
+
const concrete_type& rhs) noexcept {
|
| 61 |
+
return !(lhs == rhs);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
underlying_type id_;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
} // namespace c10
|
| 68 |
+
|
| 69 |
+
#define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \
|
| 70 |
+
namespace std { \
|
| 71 |
+
template <> \
|
| 72 |
+
struct hash<ClassName> { \
|
| 73 |
+
size_t operator()(ClassName x) const { \
|
| 74 |
+
return hash_value(x); \
|
| 75 |
+
} \
|
| 76 |
+
}; \
|
| 77 |
+
}
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Lazy.h
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <atomic>
|
| 4 |
+
#include <utility>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* Thread-safe lazy value with opportunistic concurrency: on concurrent first
|
| 10 |
+
* access, the factory may be called by multiple threads, but only one result is
|
| 11 |
+
* stored and its reference returned to all the callers.
|
| 12 |
+
*
|
| 13 |
+
* Value is heap-allocated; this optimizes for the case in which the value is
|
| 14 |
+
* never actually computed.
|
| 15 |
+
*/
|
| 16 |
+
template <class T>
|
| 17 |
+
class OptimisticLazy {
|
| 18 |
+
public:
|
| 19 |
+
OptimisticLazy() = default;
|
| 20 |
+
OptimisticLazy(const OptimisticLazy& other) {
|
| 21 |
+
if (T* value = other.value_.load(std::memory_order_acquire)) {
|
| 22 |
+
value_ = new T(*value);
|
| 23 |
+
}
|
| 24 |
+
}
|
| 25 |
+
OptimisticLazy(OptimisticLazy&& other) noexcept
|
| 26 |
+
: value_(other.value_.exchange(nullptr, std::memory_order_acq_rel)) {}
|
| 27 |
+
~OptimisticLazy() {
|
| 28 |
+
reset();
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
template <class Factory>
|
| 32 |
+
T& ensure(Factory&& factory) {
|
| 33 |
+
if (T* value = value_.load(std::memory_order_acquire)) {
|
| 34 |
+
return *value;
|
| 35 |
+
}
|
| 36 |
+
T* value = new T(factory());
|
| 37 |
+
T* old = nullptr;
|
| 38 |
+
if (!value_.compare_exchange_strong(
|
| 39 |
+
old, value, std::memory_order_release, std::memory_order_acquire)) {
|
| 40 |
+
delete value;
|
| 41 |
+
value = old;
|
| 42 |
+
}
|
| 43 |
+
return *value;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
// The following methods are not thread-safe: they should not be called
|
| 47 |
+
// concurrently with any other method.
|
| 48 |
+
|
| 49 |
+
OptimisticLazy& operator=(const OptimisticLazy& other) {
|
| 50 |
+
*this = OptimisticLazy{other};
|
| 51 |
+
return *this;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
OptimisticLazy& operator=(OptimisticLazy&& other) noexcept {
|
| 55 |
+
if (this != &other) {
|
| 56 |
+
reset();
|
| 57 |
+
value_.store(
|
| 58 |
+
other.value_.exchange(nullptr, std::memory_order_acquire),
|
| 59 |
+
std::memory_order_release);
|
| 60 |
+
}
|
| 61 |
+
return *this;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
void reset() {
|
| 65 |
+
if (T* old = value_.load(std::memory_order_relaxed)) {
|
| 66 |
+
value_.store(nullptr, std::memory_order_relaxed);
|
| 67 |
+
delete old;
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
private:
|
| 72 |
+
std::atomic<T*> value_{nullptr};
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
/**
|
| 76 |
+
* Interface for a value that is computed on first access.
|
| 77 |
+
*/
|
| 78 |
+
template <class T>
|
| 79 |
+
class LazyValue {
|
| 80 |
+
public:
|
| 81 |
+
virtual ~LazyValue() = default;
|
| 82 |
+
|
| 83 |
+
virtual const T& get() const = 0;
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
/**
|
| 87 |
+
* Convenience thread-safe LazyValue implementation with opportunistic
|
| 88 |
+
* concurrency.
|
| 89 |
+
*/
|
| 90 |
+
template <class T>
|
| 91 |
+
class OptimisticLazyValue : public LazyValue<T> {
|
| 92 |
+
public:
|
| 93 |
+
const T& get() const override {
|
| 94 |
+
return value_.ensure([this] { return compute(); });
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
private:
|
| 98 |
+
virtual T compute() const = 0;
|
| 99 |
+
|
| 100 |
+
mutable OptimisticLazy<T> value_;
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
/**
|
| 104 |
+
* Convenience immutable (thus thread-safe) LazyValue implementation for cases
|
| 105 |
+
* in which the value is not actually lazy.
|
| 106 |
+
*/
|
| 107 |
+
template <class T>
|
| 108 |
+
class PrecomputedLazyValue : public LazyValue<T> {
|
| 109 |
+
public:
|
| 110 |
+
PrecomputedLazyValue(T value) : value_(std::move(value)) {}
|
| 111 |
+
|
| 112 |
+
const T& get() const override {
|
| 113 |
+
return value_;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
private:
|
| 117 |
+
T value_;
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/macros/Macros.h>
|
| 2 |
+
#include <c10/util/Synchronized.h>
|
| 3 |
+
#include <array>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <thread>
|
| 7 |
+
|
| 8 |
+
namespace c10 {
|
| 9 |
+
|
| 10 |
+
namespace detail {
|
| 11 |
+
|
| 12 |
+
struct IncrementRAII final {
|
| 13 |
+
public:
|
| 14 |
+
explicit IncrementRAII(std::atomic<int32_t>* counter) : _counter(counter) {
|
| 15 |
+
_counter->fetch_add(1);
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
~IncrementRAII() {
|
| 19 |
+
_counter->fetch_sub(1);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
private:
|
| 23 |
+
std::atomic<int32_t>* _counter;
|
| 24 |
+
|
| 25 |
+
C10_DISABLE_COPY_AND_ASSIGN(IncrementRAII);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
} // namespace detail
|
| 29 |
+
|
| 30 |
+
// LeftRight wait-free readers synchronization primitive
|
| 31 |
+
// https://hal.archives-ouvertes.fr/hal-01207881/document
|
| 32 |
+
//
|
| 33 |
+
// LeftRight is quite easy to use (it can make an arbitrary
|
| 34 |
+
// data structure permit wait-free reads), but it has some
|
| 35 |
+
// particular performance characteristics you should be aware
|
| 36 |
+
// of if you're deciding to use it:
|
| 37 |
+
//
|
| 38 |
+
// - Reads still incur an atomic write (this is how LeftRight
|
| 39 |
+
// keeps track of how long it needs to keep around the old
|
| 40 |
+
// data structure)
|
| 41 |
+
//
|
| 42 |
+
// - Writes get executed twice, to keep both the left and right
|
| 43 |
+
// versions up to date. So if your write is expensive or
|
| 44 |
+
// nondeterministic, this is also an inappropriate structure
|
| 45 |
+
//
|
| 46 |
+
// LeftRight is used fairly rarely in PyTorch's codebase. If you
|
| 47 |
+
// are still not sure if you need it or not, consult your local
|
| 48 |
+
// C++ expert.
|
| 49 |
+
//
|
| 50 |
+
template <class T>
|
| 51 |
+
class LeftRight final {
|
| 52 |
+
public:
|
| 53 |
+
template <class... Args>
|
| 54 |
+
explicit LeftRight(const Args&... args)
|
| 55 |
+
: _counters{{{0}, {0}}},
|
| 56 |
+
_foregroundCounterIndex(0),
|
| 57 |
+
_foregroundDataIndex(0),
|
| 58 |
+
_data{{T{args...}, T{args...}}},
|
| 59 |
+
_writeMutex() {}
|
| 60 |
+
|
| 61 |
+
// Copying and moving would not be threadsafe.
|
| 62 |
+
// Needs more thought and careful design to make that work.
|
| 63 |
+
LeftRight(const LeftRight&) = delete;
|
| 64 |
+
LeftRight(LeftRight&&) noexcept = delete;
|
| 65 |
+
LeftRight& operator=(const LeftRight&) = delete;
|
| 66 |
+
LeftRight& operator=(LeftRight&&) noexcept = delete;
|
| 67 |
+
|
| 68 |
+
~LeftRight() {
|
| 69 |
+
// wait until any potentially running writers are finished
|
| 70 |
+
{ std::unique_lock<std::mutex> lock(_writeMutex); }
|
| 71 |
+
|
| 72 |
+
// wait until any potentially running readers are finished
|
| 73 |
+
while (_counters[0].load() != 0 || _counters[1].load() != 0) {
|
| 74 |
+
std::this_thread::yield();
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
template <typename F>
|
| 79 |
+
auto read(F&& readFunc) const {
|
| 80 |
+
detail::IncrementRAII _increment_counter(
|
| 81 |
+
&_counters[_foregroundCounterIndex.load()]);
|
| 82 |
+
|
| 83 |
+
return std::forward<F>(readFunc)(_data[_foregroundDataIndex.load()]);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
// Throwing an exception in writeFunc is ok but causes the state to be either
|
| 87 |
+
// the old or the new state, depending on if the first or the second call to
|
| 88 |
+
// writeFunc threw.
|
| 89 |
+
template <typename F>
|
| 90 |
+
auto write(F&& writeFunc) {
|
| 91 |
+
std::unique_lock<std::mutex> lock(_writeMutex);
|
| 92 |
+
|
| 93 |
+
return _write(std::forward<F>(writeFunc));
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
private:
|
| 97 |
+
template <class F>
|
| 98 |
+
auto _write(const F& writeFunc) {
|
| 99 |
+
/*
|
| 100 |
+
* Assume, A is in background and B in foreground. In simplified terms, we
|
| 101 |
+
* want to do the following:
|
| 102 |
+
* 1. Write to A (old background)
|
| 103 |
+
* 2. Switch A/B
|
| 104 |
+
* 3. Write to B (new background)
|
| 105 |
+
*
|
| 106 |
+
* More detailed algorithm (explanations on why this is important are below
|
| 107 |
+
* in code):
|
| 108 |
+
* 1. Write to A
|
| 109 |
+
* 2. Switch A/B data pointers
|
| 110 |
+
* 3. Wait until A counter is zero
|
| 111 |
+
* 4. Switch A/B counters
|
| 112 |
+
* 5. Wait until B counter is zero
|
| 113 |
+
* 6. Write to B
|
| 114 |
+
*/
|
| 115 |
+
|
| 116 |
+
auto localDataIndex = _foregroundDataIndex.load();
|
| 117 |
+
|
| 118 |
+
// 1. Write to A
|
| 119 |
+
_callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex);
|
| 120 |
+
|
| 121 |
+
// 2. Switch A/B data pointers
|
| 122 |
+
localDataIndex = localDataIndex ^ 1;
|
| 123 |
+
_foregroundDataIndex = localDataIndex;
|
| 124 |
+
|
| 125 |
+
/*
|
| 126 |
+
* 3. Wait until A counter is zero
|
| 127 |
+
*
|
| 128 |
+
* In the previous write run, A was foreground and B was background.
|
| 129 |
+
* There was a time after switching _foregroundDataIndex (B to foreground)
|
| 130 |
+
* and before switching _foregroundCounterIndex, in which new readers could
|
| 131 |
+
* have read B but incremented A's counter.
|
| 132 |
+
*
|
| 133 |
+
* In this current run, we just switched _foregroundDataIndex (A back to
|
| 134 |
+
* foreground), but before writing to the new background B, we have to make
|
| 135 |
+
* sure A's counter was zero briefly, so all these old readers are gone.
|
| 136 |
+
*/
|
| 137 |
+
auto localCounterIndex = _foregroundCounterIndex.load();
|
| 138 |
+
_waitForBackgroundCounterToBeZero(localCounterIndex);
|
| 139 |
+
|
| 140 |
+
/*
|
| 141 |
+
* 4. Switch A/B counters
|
| 142 |
+
*
|
| 143 |
+
* Now that we know all readers on B are really gone, we can switch the
|
| 144 |
+
* counters and have new readers increment A's counter again, which is the
|
| 145 |
+
* correct counter since they're reading A.
|
| 146 |
+
*/
|
| 147 |
+
localCounterIndex = localCounterIndex ^ 1;
|
| 148 |
+
_foregroundCounterIndex = localCounterIndex;
|
| 149 |
+
|
| 150 |
+
/*
|
| 151 |
+
* 5. Wait until B counter is zero
|
| 152 |
+
*
|
| 153 |
+
* This waits for all the readers on B that came in while both data and
|
| 154 |
+
* counter for B was in foreground, i.e. normal readers that happened
|
| 155 |
+
* outside of that brief gap between switching data and counter.
|
| 156 |
+
*/
|
| 157 |
+
_waitForBackgroundCounterToBeZero(localCounterIndex);
|
| 158 |
+
|
| 159 |
+
// 6. Write to B
|
| 160 |
+
return _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
template <class F>
|
| 164 |
+
auto _callWriteFuncOnBackgroundInstance(
|
| 165 |
+
const F& writeFunc,
|
| 166 |
+
uint8_t localDataIndex) {
|
| 167 |
+
try {
|
| 168 |
+
return writeFunc(_data[localDataIndex ^ 1]);
|
| 169 |
+
} catch (...) {
|
| 170 |
+
// recover invariant by copying from the foreground instance
|
| 171 |
+
_data[localDataIndex ^ 1] = _data[localDataIndex];
|
| 172 |
+
// rethrow
|
| 173 |
+
throw;
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
void _waitForBackgroundCounterToBeZero(uint8_t counterIndex) {
|
| 178 |
+
while (_counters[counterIndex ^ 1].load() != 0) {
|
| 179 |
+
std::this_thread::yield();
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
mutable std::array<std::atomic<int32_t>, 2> _counters;
|
| 184 |
+
std::atomic<uint8_t> _foregroundCounterIndex;
|
| 185 |
+
std::atomic<uint8_t> _foregroundDataIndex;
|
| 186 |
+
std::array<T, 2> _data;
|
| 187 |
+
std::mutex _writeMutex;
|
| 188 |
+
};
|
| 189 |
+
|
| 190 |
+
// RWSafeLeftRightWrapper is API compatible with LeftRight and uses a
|
| 191 |
+
// read-write lock to protect T (data).
|
| 192 |
+
template <class T>
|
| 193 |
+
class RWSafeLeftRightWrapper final {
|
| 194 |
+
public:
|
| 195 |
+
template <class... Args>
|
| 196 |
+
explicit RWSafeLeftRightWrapper(const Args&... args) : data_{args...} {}
|
| 197 |
+
|
| 198 |
+
// RWSafeLeftRightWrapper is not copyable or moveable since LeftRight
|
| 199 |
+
// is not copyable or moveable.
|
| 200 |
+
RWSafeLeftRightWrapper(const RWSafeLeftRightWrapper&) = delete;
|
| 201 |
+
RWSafeLeftRightWrapper(RWSafeLeftRightWrapper&&) noexcept = delete;
|
| 202 |
+
RWSafeLeftRightWrapper& operator=(const RWSafeLeftRightWrapper&) = delete;
|
| 203 |
+
RWSafeLeftRightWrapper& operator=(RWSafeLeftRightWrapper&&) noexcept = delete;
|
| 204 |
+
|
| 205 |
+
template <typename F>
|
| 206 |
+
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
| 207 |
+
auto read(F&& readFunc) const {
|
| 208 |
+
return data_.withLock(
|
| 209 |
+
[&readFunc](T const& data) { return std::forward<F>(readFunc)(data); });
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
template <typename F>
|
| 213 |
+
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
|
| 214 |
+
auto write(F&& writeFunc) {
|
| 215 |
+
return data_.withLock(
|
| 216 |
+
[&writeFunc](T& data) { return std::forward<F>(writeFunc)(data); });
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
private:
|
| 220 |
+
c10::Synchronized<T> data_;
|
| 221 |
+
};
|
| 222 |
+
|
| 223 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Logging.h
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_LOGGING_H_
|
| 2 |
+
#define C10_UTIL_LOGGING_H_
|
| 3 |
+
|
| 4 |
+
#include <climits>
|
| 5 |
+
#include <exception>
|
| 6 |
+
#include <functional>
|
| 7 |
+
#include <limits>
|
| 8 |
+
#include <sstream>
|
| 9 |
+
|
| 10 |
+
#include <c10/macros/Macros.h>
|
| 11 |
+
#include <c10/util/Backtrace.h>
|
| 12 |
+
#include <c10/util/Exception.h>
|
| 13 |
+
#include <c10/util/Flags.h>
|
| 14 |
+
#include <c10/util/StringUtil.h>
|
| 15 |
+
|
| 16 |
+
// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off
|
| 17 |
+
// logging at compile time so no logging message below that level is produced
|
| 18 |
+
// at all. The value should be between INT_MIN and CAFFE_FATAL.
|
| 19 |
+
#ifndef CAFFE2_LOG_THRESHOLD
|
| 20 |
+
// If we have not defined the compile time log threshold, we keep all the
|
| 21 |
+
// log cases.
|
| 22 |
+
#define CAFFE2_LOG_THRESHOLD INT_MIN
|
| 23 |
+
#endif // CAFFE2_LOG_THRESHOLD
|
| 24 |
+
|
| 25 |
+
// Below are different implementations for glog and non-glog cases.
|
| 26 |
+
#ifdef C10_USE_GLOG
|
| 27 |
+
#include <c10/util/logging_is_google_glog.h>
|
| 28 |
+
#else // !C10_USE_GLOG
|
| 29 |
+
#include <c10/util/logging_is_not_google_glog.h>
|
| 30 |
+
#endif // C10_USE_GLOG
|
| 31 |
+
|
| 32 |
+
C10_DECLARE_int(caffe2_log_level);
|
| 33 |
+
C10_DECLARE_bool(caffe2_use_fatal_for_enforce);
|
| 34 |
+
|
| 35 |
+
// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's
|
| 36 |
+
// not available - just short-circuit to the always working one one.
|
| 37 |
+
// We define the C10_ name to avoid confusing other files
|
| 38 |
+
#ifdef LOG_EVERY_MS
|
| 39 |
+
#define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms)
|
| 40 |
+
#else
|
| 41 |
+
#define C10_LOG_EVERY_MS(severity, ms) LOG(severity)
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
// Same for LOG_FIRST_N
|
| 45 |
+
#ifdef LOG_FIRST_N
|
| 46 |
+
#define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n)
|
| 47 |
+
#else
|
| 48 |
+
#define C10_LOG_FIRST_N(severity, n) LOG(severity)
|
| 49 |
+
#endif
|
| 50 |
+
|
| 51 |
+
// Same for LOG_EVERY_N
|
| 52 |
+
#ifdef LOG_EVERY_N
|
| 53 |
+
#define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
|
| 54 |
+
#else
|
| 55 |
+
#define C10_LOG_EVERY_N(severity, n) LOG(severity)
|
| 56 |
+
#endif
|
| 57 |
+
|
| 58 |
+
namespace c10 {
|
| 59 |
+
|
| 60 |
+
using std::string;
|
| 61 |
+
|
| 62 |
+
// Functions that we use for initialization.
|
| 63 |
+
C10_API bool InitCaffeLogging(int* argc, char** argv);
|
| 64 |
+
C10_API void UpdateLoggingLevelsFromFlags();
|
| 65 |
+
|
| 66 |
+
[[noreturn]] C10_API void ThrowEnforceNotMet(
|
| 67 |
+
const char* file,
|
| 68 |
+
const int line,
|
| 69 |
+
const char* condition,
|
| 70 |
+
const std::string& msg,
|
| 71 |
+
const void* caller = nullptr);
|
| 72 |
+
|
| 73 |
+
[[noreturn]] C10_API void ThrowEnforceNotMet(
|
| 74 |
+
const char* file,
|
| 75 |
+
const int line,
|
| 76 |
+
const char* condition,
|
| 77 |
+
const char* msg,
|
| 78 |
+
const void* caller = nullptr);
|
| 79 |
+
|
| 80 |
+
[[noreturn]] C10_API inline void ThrowEnforceNotMet(
|
| 81 |
+
const char* file,
|
| 82 |
+
const int line,
|
| 83 |
+
const char* condition,
|
| 84 |
+
detail::CompileTimeEmptyString /*msg*/,
|
| 85 |
+
const void* caller = nullptr) {
|
| 86 |
+
ThrowEnforceNotMet(file, line, condition, "", caller);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
[[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
|
| 90 |
+
const char* file,
|
| 91 |
+
const int line,
|
| 92 |
+
const char* condition,
|
| 93 |
+
const std::string& msg,
|
| 94 |
+
const void* caller = nullptr);
|
| 95 |
+
|
| 96 |
+
[[noreturn]] C10_API void ThrowEnforceFiniteNotMet(
|
| 97 |
+
const char* file,
|
| 98 |
+
const int line,
|
| 99 |
+
const char* condition,
|
| 100 |
+
const char* msg,
|
| 101 |
+
const void* caller = nullptr);
|
| 102 |
+
|
| 103 |
+
[[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet(
|
| 104 |
+
const char* file,
|
| 105 |
+
const int line,
|
| 106 |
+
const char* condition,
|
| 107 |
+
detail::CompileTimeEmptyString /*msg*/,
|
| 108 |
+
const void* caller = nullptr) {
|
| 109 |
+
ThrowEnforceFiniteNotMet(file, line, condition, "", caller);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
constexpr bool IsUsingGoogleLogging() {
|
| 113 |
+
#ifdef C10_USE_GLOG
|
| 114 |
+
return true;
|
| 115 |
+
#else
|
| 116 |
+
return false;
|
| 117 |
+
#endif
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
/**
|
| 121 |
+
* A utility to allow one to show log info to stderr after the program starts.
|
| 122 |
+
*
|
| 123 |
+
* This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level
|
| 124 |
+
* to smaller than INFO. You are recommended to only use this in a few sparse
|
| 125 |
+
* cases, such as when you want to write a tutorial or something. Normally, use
|
| 126 |
+
* the commandline flags to set the log level.
|
| 127 |
+
*/
|
| 128 |
+
C10_API void ShowLogInfoToStderr();
|
| 129 |
+
|
| 130 |
+
C10_API void SetStackTraceFetcher(std::function<::c10::Backtrace()> fetcher);
|
| 131 |
+
|
| 132 |
+
/**
|
| 133 |
+
* Convenience function for non-lazy stack trace fetchers. The Backtrace
|
| 134 |
+
* overload should be preferred when stringifying the backtrace is expensive.
|
| 135 |
+
*/
|
| 136 |
+
C10_API void SetStackTraceFetcher(std::function<std::string()> fetcher);
|
| 137 |
+
|
| 138 |
+
using EnforceNotMet = ::c10::Error;
|
| 139 |
+
|
| 140 |
+
#define CAFFE_ENFORCE(condition, ...) \
|
| 141 |
+
do { \
|
| 142 |
+
if (C10_UNLIKELY(!(condition))) { \
|
| 143 |
+
::c10::ThrowEnforceNotMet( \
|
| 144 |
+
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
|
| 145 |
+
} \
|
| 146 |
+
} while (false)
|
| 147 |
+
|
| 148 |
+
#define CAFFE_ENFORCE_FINITE(condition, ...) \
|
| 149 |
+
do { \
|
| 150 |
+
if (C10_UNLIKELY(!(condition))) { \
|
| 151 |
+
::c10::ThrowEnforceFiniteNotMet( \
|
| 152 |
+
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \
|
| 153 |
+
} \
|
| 154 |
+
} while (false)
|
| 155 |
+
|
| 156 |
+
#define CAFFE_ENFORCE_WITH_CALLER(condition, ...) \
|
| 157 |
+
do { \
|
| 158 |
+
if (C10_UNLIKELY(!(condition))) { \
|
| 159 |
+
::c10::ThrowEnforceNotMet( \
|
| 160 |
+
__FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); \
|
| 161 |
+
} \
|
| 162 |
+
} while (false)
|
| 163 |
+
|
| 164 |
+
#define CAFFE_THROW(...) \
|
| 165 |
+
::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__))
|
| 166 |
+
|
| 167 |
+
/**
|
| 168 |
+
* Rich logging messages
|
| 169 |
+
*
|
| 170 |
+
* CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that
|
| 171 |
+
* capture input argument values and add it to the exception message. E.g.
|
| 172 |
+
* `CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")`
|
| 173 |
+
* would evaluate both foo and bar only once and if the results are not equal -
|
| 174 |
+
* include them in the exception message.
|
| 175 |
+
*
|
| 176 |
+
* Some of the basic checker functions like Equals or Greater are already
|
| 177 |
+
* defined below. Other header might define customized checkers by adding
|
| 178 |
+
* functions to caffe2::enforce_detail namespace. For example:
|
| 179 |
+
*
|
| 180 |
+
* namespace caffe2 { namespace enforce_detail {
|
| 181 |
+
* inline EnforceFailMessage IsVector(const vector<int64_t>& shape) {
|
| 182 |
+
* if (shape.size() == 1) { return EnforceOK(); }
|
| 183 |
+
* return c10::str("Shape ", shape, " is not a vector");
|
| 184 |
+
* }
|
| 185 |
+
* }}
|
| 186 |
+
*
|
| 187 |
+
* With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`
|
| 188 |
+
*
|
| 189 |
+
* Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided
|
| 190 |
+
* too. Please use them instead of TORCH_CHECK_EQ and friends for failures in
|
| 191 |
+
* user-provided input.
|
| 192 |
+
*/
|
| 193 |
+
|
| 194 |
+
namespace enforce_detail {
|
| 195 |
+
|
| 196 |
+
template <typename T1, typename T2>
|
| 197 |
+
std::string enforceFailMsgImpl(const T1& x, const T2& y) {
|
| 198 |
+
return c10::str(x, " vs ", y);
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
template <typename T1, typename T2, typename... Args>
|
| 202 |
+
std::string enforceFailMsgImpl(const T1& x, const T2& y, const Args&... args) {
|
| 203 |
+
return c10::str(x, " vs ", y, ". ", args...);
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
template <typename Pred, typename T1, typename T2, typename GetFailMsgFunc>
|
| 207 |
+
void enforceThatImpl(
|
| 208 |
+
Pred p,
|
| 209 |
+
const T1& lhs,
|
| 210 |
+
const T2& rhs,
|
| 211 |
+
const char* file,
|
| 212 |
+
int line,
|
| 213 |
+
const char* expr,
|
| 214 |
+
const void* caller,
|
| 215 |
+
GetFailMsgFunc getFailMsg) {
|
| 216 |
+
if (C10_UNLIKELY(!(p(lhs, rhs)))) {
|
| 217 |
+
::c10::ThrowEnforceNotMet(file, line, expr, getFailMsg(lhs, rhs), caller);
|
| 218 |
+
}
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
#define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) \
|
| 222 |
+
::c10::enforce_detail::enforceThatImpl( \
|
| 223 |
+
op, \
|
| 224 |
+
(lhs), \
|
| 225 |
+
(rhs), \
|
| 226 |
+
__FILE__, \
|
| 227 |
+
__LINE__, \
|
| 228 |
+
expr, \
|
| 229 |
+
nullptr, \
|
| 230 |
+
[&](const auto& arg1, const auto& arg2) { \
|
| 231 |
+
return ::c10::enforce_detail::enforceFailMsgImpl( \
|
| 232 |
+
arg1, arg2, ##__VA_ARGS__); \
|
| 233 |
+
})
|
| 234 |
+
|
| 235 |
+
#define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) \
|
| 236 |
+
::c10::enforce_detail::enforceThatImpl( \
|
| 237 |
+
op, \
|
| 238 |
+
(lhs), \
|
| 239 |
+
(rhs), \
|
| 240 |
+
__FILE__, \
|
| 241 |
+
__LINE__, \
|
| 242 |
+
expr, \
|
| 243 |
+
this, \
|
| 244 |
+
[&](const auto& arg1, const auto& arg2) { \
|
| 245 |
+
return ::c10::enforce_detail::enforceFailMsgImpl( \
|
| 246 |
+
arg1, arg2, ##__VA_ARGS__); \
|
| 247 |
+
})
|
| 248 |
+
|
| 249 |
+
} // namespace enforce_detail
|
| 250 |
+
|
| 251 |
+
#define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) \
|
| 252 |
+
CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__)
|
| 253 |
+
|
| 254 |
+
#define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) \
|
| 255 |
+
CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
|
| 256 |
+
#define CAFFE_ENFORCE_EQ(x, y, ...) \
|
| 257 |
+
CAFFE_ENFORCE_BINARY_OP(std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
|
| 258 |
+
#define CAFFE_ENFORCE_NE(x, y, ...) \
|
| 259 |
+
CAFFE_ENFORCE_BINARY_OP(std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
|
| 260 |
+
#define CAFFE_ENFORCE_LE(x, y, ...) \
|
| 261 |
+
CAFFE_ENFORCE_BINARY_OP(std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
|
| 262 |
+
#define CAFFE_ENFORCE_LT(x, y, ...) \
|
| 263 |
+
CAFFE_ENFORCE_BINARY_OP(std::less<void>(), <, x, y, ##__VA_ARGS__)
|
| 264 |
+
#define CAFFE_ENFORCE_GE(x, y, ...) \
|
| 265 |
+
CAFFE_ENFORCE_BINARY_OP(std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
|
| 266 |
+
#define CAFFE_ENFORCE_GT(x, y, ...) \
|
| 267 |
+
CAFFE_ENFORCE_BINARY_OP(std::greater<void>(), >, x, y, ##__VA_ARGS__)
|
| 268 |
+
|
| 269 |
+
#define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) \
|
| 270 |
+
CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( \
|
| 271 |
+
cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__)
|
| 272 |
+
#define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) \
|
| 273 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
| 274 |
+
std::equal_to<void>(), ==, x, y, ##__VA_ARGS__)
|
| 275 |
+
#define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) \
|
| 276 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
| 277 |
+
std::not_equal_to<void>(), !=, x, y, ##__VA_ARGS__)
|
| 278 |
+
#define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) \
|
| 279 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
| 280 |
+
std::less_equal<void>(), <=, x, y, ##__VA_ARGS__)
|
| 281 |
+
#define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) \
|
| 282 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less<void>(), <, x, y, ##__VA_ARGS__)
|
| 283 |
+
#define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) \
|
| 284 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
| 285 |
+
std::greater_equal<void>(), >=, x, y, ##__VA_ARGS__)
|
| 286 |
+
#define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) \
|
| 287 |
+
CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \
|
| 288 |
+
std::greater<void>(), >, x, y, ##__VA_ARGS__)
|
| 289 |
+
|
| 290 |
+
struct IValue;
|
| 291 |
+
class C10_API EventSampledHandler {
|
| 292 |
+
public:
|
| 293 |
+
virtual void log(
|
| 294 |
+
std::string_view model_id,
|
| 295 |
+
const std::vector<c10::IValue>& args) = 0;
|
| 296 |
+
virtual ~EventSampledHandler() = default;
|
| 297 |
+
};
|
| 298 |
+
|
| 299 |
+
#define C10_LOG_EVENT_SAMPLED(event, ...) \
|
| 300 |
+
static const std::unique_ptr<::c10::EventSampledHandler>& \
|
| 301 |
+
_##event##EventSampledHandler = ::c10::GetEventSampledHandler(#event); \
|
| 302 |
+
if (_##event##EventSampledHandler) { \
|
| 303 |
+
_##event##EventSampledHandler->log(__VA_ARGS__); \
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
// Must be called in the main thread before any other threads are spawned.
|
| 307 |
+
C10_API void InitEventSampledHandlers(
|
| 308 |
+
std::vector<
|
| 309 |
+
std::pair<std::string_view, std::unique_ptr<EventSampledHandler>>>);
|
| 310 |
+
C10_API const std::unique_ptr<EventSampledHandler>& GetEventSampledHandler(
|
| 311 |
+
std::string_view);
|
| 312 |
+
|
| 313 |
+
/**
|
| 314 |
+
* Very lightweight logging for the first time API usage. It's beneficial for
|
| 315 |
+
* tracking of individual functionality usage in larger applications.
|
| 316 |
+
*
|
| 317 |
+
* In order to ensure light-weightedness of logging, we utilize static variable
|
| 318 |
+
* trick - LogAPIUsage will be invoked only once and further invocations will
|
| 319 |
+
* just do an atomic check.
|
| 320 |
+
*
|
| 321 |
+
* Example:
|
| 322 |
+
* // Logs caller info with an arbitrary text event, if there is a usage.
|
| 323 |
+
* C10_LOG_API_USAGE_ONCE("my_api");
|
| 324 |
+
*/
|
| 325 |
+
#define C10_LOG_API_USAGE_ONCE(...) \
|
| 326 |
+
C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = \
|
| 327 |
+
::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__);
|
| 328 |
+
|
| 329 |
+
// API usage logging capabilities
|
| 330 |
+
C10_API void SetAPIUsageLogger(std::function<void(const std::string&)> logger);
|
| 331 |
+
C10_API void LogAPIUsage(const std::string& context);
|
| 332 |
+
|
| 333 |
+
C10_API void SetAPIUsageMetadataLogger(
|
| 334 |
+
std::function<void(
|
| 335 |
+
const std::string&,
|
| 336 |
+
const std::map<std::string, std::string>& metadata_map)> logger);
|
| 337 |
+
C10_API void LogAPIUsageMetadata(
|
| 338 |
+
const std::string& context,
|
| 339 |
+
const std::map<std::string, std::string>& metadata_map);
|
| 340 |
+
|
| 341 |
+
// PyTorch ddp usage logging capabilities
|
| 342 |
+
// DDPLoggingData holds data that can be logged in applications
|
| 343 |
+
// for analysis and debugging. Data structure is defined in
|
| 344 |
+
// c10 directory so that it can be easily imported by both c10
|
| 345 |
+
// and torch files.
|
| 346 |
+
struct DDPLoggingData {
|
| 347 |
+
// logging fields that are string types.
|
| 348 |
+
std::map<std::string, std::string> strs_map;
|
| 349 |
+
// logging fields that are int64_t types.
|
| 350 |
+
std::map<std::string, int64_t> ints_map;
|
| 351 |
+
};
|
| 352 |
+
|
| 353 |
+
C10_API void SetPyTorchDDPUsageLogger(
|
| 354 |
+
std::function<void(const DDPLoggingData&)> logger);
|
| 355 |
+
C10_API void LogPyTorchDDPUsage(const DDPLoggingData& ddpData);
|
| 356 |
+
|
| 357 |
+
namespace detail {
|
| 358 |
+
// Return value is needed to do the static variable initialization trick
|
| 359 |
+
C10_API bool LogAPIUsageFakeReturn(const std::string& context);
|
| 360 |
+
} // namespace detail
|
| 361 |
+
|
| 362 |
+
// Initializes the c10 logger.
|
| 363 |
+
C10_API void initLogging();
|
| 364 |
+
|
| 365 |
+
// Sets the rank, which will be included in log messages
|
| 366 |
+
C10_API void SetGlobalRank(int64_t rank);
|
| 367 |
+
|
| 368 |
+
} // namespace c10
|
| 369 |
+
|
| 370 |
+
#endif // C10_UTIL_LOGGING_H_
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/NetworkFlow.h
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* This file provides a network flow implementation.
|
| 10 |
+
* https://en.wikipedia.org/wiki/Flow_network
|
| 11 |
+
*
|
| 12 |
+
* It aims to mirror some of the behavior of networkx, which is/was used by
|
| 13 |
+
* functorch partitioners for splitting the graph into a forward and backward
|
| 14 |
+
* graph.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
namespace c10 {
|
| 18 |
+
|
| 19 |
+
enum class C10_API_ENUM MinCutStatus {
|
| 20 |
+
SUCCESS = 0,
|
| 21 |
+
UNBOUNDED = 1,
|
| 22 |
+
OVERFLOW_INF = 2,
|
| 23 |
+
INVALID = 3,
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
struct MinCutResult {
|
| 27 |
+
MinCutStatus status;
|
| 28 |
+
int64_t max_flow;
|
| 29 |
+
std::vector<std::string> reachable;
|
| 30 |
+
std::vector<std::string> unreachable;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
// Modeled after networkx implementation
|
| 34 |
+
class C10_API NetworkFlowGraph {
|
| 35 |
+
public:
|
| 36 |
+
// selected such that INF + INF is < INT64_MAX
|
| 37 |
+
constexpr static int64_t INF = (1LL << 62) - 1;
|
| 38 |
+
|
| 39 |
+
struct Edge {
|
| 40 |
+
std::string source, dest;
|
| 41 |
+
int64_t capacity;
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
MinCutStatus add_edge(
|
| 45 |
+
const std::string& source,
|
| 46 |
+
const std::string& dest,
|
| 47 |
+
int64_t capacity = 1);
|
| 48 |
+
|
| 49 |
+
MinCutResult minimum_cut(const std::string& s, const std::string& t) const;
|
| 50 |
+
|
| 51 |
+
std::vector<Edge> edges;
|
| 52 |
+
};
|
| 53 |
+
|
| 54 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ParallelGuard.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
// RAII thread local guard that tracks whether code is being executed in
|
| 8 |
+
// `at::parallel_for` or `at::parallel_reduce` loop function.
|
| 9 |
+
class C10_API ParallelGuard {
|
| 10 |
+
public:
|
| 11 |
+
static bool is_enabled();
|
| 12 |
+
|
| 13 |
+
ParallelGuard(bool state);
|
| 14 |
+
~ParallelGuard();
|
| 15 |
+
|
| 16 |
+
private:
|
| 17 |
+
bool previous_state_;
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Registry.h
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_REGISTRY_H_
|
| 2 |
+
#define C10_UTIL_REGISTRY_H_
|
| 3 |
+
|
| 4 |
+
/**
|
| 5 |
+
* Simple registry implementation that uses static variables to
|
| 6 |
+
* register object creators during program initialization time.
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
// NB: This Registry works poorly when you have other namespaces.
|
| 10 |
+
// Make all macro invocations from inside the at namespace.
|
| 11 |
+
|
| 12 |
+
#include <cstdio>
|
| 13 |
+
#include <cstdlib>
|
| 14 |
+
#include <functional>
|
| 15 |
+
#include <memory>
|
| 16 |
+
#include <mutex>
|
| 17 |
+
#include <stdexcept>
|
| 18 |
+
#include <string>
|
| 19 |
+
#include <unordered_map>
|
| 20 |
+
#include <vector>
|
| 21 |
+
|
| 22 |
+
#include <c10/macros/Export.h>
|
| 23 |
+
#include <c10/macros/Macros.h>
|
| 24 |
+
#include <c10/util/Type.h>
|
| 25 |
+
|
| 26 |
+
namespace c10 {
|
| 27 |
+
|
| 28 |
+
template <typename KeyType>
|
| 29 |
+
inline std::string KeyStrRepr(const KeyType& /*key*/) {
|
| 30 |
+
return "[key type printing not supported]";
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
template <>
|
| 34 |
+
inline std::string KeyStrRepr(const std::string& key) {
|
| 35 |
+
return key;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
enum RegistryPriority {
|
| 39 |
+
REGISTRY_FALLBACK = 1,
|
| 40 |
+
REGISTRY_DEFAULT = 2,
|
| 41 |
+
REGISTRY_PREFERRED = 3,
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
/**
|
| 45 |
+
* @brief A template class that allows one to register classes by keys.
|
| 46 |
+
*
|
| 47 |
+
* The keys are usually a std::string specifying the name, but can be anything
|
| 48 |
+
* that can be used in a std::map.
|
| 49 |
+
*
|
| 50 |
+
* You should most likely not use the Registry class explicitly, but use the
|
| 51 |
+
* helper macros below to declare specific registries as well as registering
|
| 52 |
+
* objects.
|
| 53 |
+
*/
|
| 54 |
+
template <class SrcType, class ObjectPtrType, class... Args>
|
| 55 |
+
class Registry {
|
| 56 |
+
public:
|
| 57 |
+
typedef std::function<ObjectPtrType(Args...)> Creator;
|
| 58 |
+
|
| 59 |
+
Registry(bool warning = true) : registry_(), priority_(), warning_(warning) {}
|
| 60 |
+
|
| 61 |
+
void Register(
|
| 62 |
+
const SrcType& key,
|
| 63 |
+
Creator creator,
|
| 64 |
+
const RegistryPriority priority = REGISTRY_DEFAULT) {
|
| 65 |
+
std::lock_guard<std::mutex> lock(register_mutex_);
|
| 66 |
+
// The if statement below is essentially the same as the following line:
|
| 67 |
+
// TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key
|
| 68 |
+
// << " registered twice.";
|
| 69 |
+
// However, TORCH_CHECK_EQ depends on google logging, and since registration
|
| 70 |
+
// is carried out at static initialization time, we do not want to have an
|
| 71 |
+
// explicit dependency on glog's initialization function.
|
| 72 |
+
if (registry_.count(key) != 0) {
|
| 73 |
+
auto cur_priority = priority_[key];
|
| 74 |
+
if (priority > cur_priority) {
|
| 75 |
+
#ifdef DEBUG
|
| 76 |
+
std::string warn_msg =
|
| 77 |
+
"Overwriting already registered item for key " + KeyStrRepr(key);
|
| 78 |
+
fprintf(stderr, "%s\n", warn_msg.c_str());
|
| 79 |
+
#endif
|
| 80 |
+
registry_[key] = creator;
|
| 81 |
+
priority_[key] = priority;
|
| 82 |
+
} else if (priority == cur_priority) {
|
| 83 |
+
std::string err_msg =
|
| 84 |
+
"Key already registered with the same priority: " + KeyStrRepr(key);
|
| 85 |
+
fprintf(stderr, "%s\n", err_msg.c_str());
|
| 86 |
+
if (terminate_) {
|
| 87 |
+
std::exit(1);
|
| 88 |
+
} else {
|
| 89 |
+
throw std::runtime_error(err_msg);
|
| 90 |
+
}
|
| 91 |
+
} else if (warning_) {
|
| 92 |
+
std::string warn_msg =
|
| 93 |
+
"Higher priority item already registered, skipping registration of " +
|
| 94 |
+
KeyStrRepr(key);
|
| 95 |
+
fprintf(stderr, "%s\n", warn_msg.c_str());
|
| 96 |
+
}
|
| 97 |
+
} else {
|
| 98 |
+
registry_[key] = creator;
|
| 99 |
+
priority_[key] = priority;
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
void Register(
|
| 104 |
+
const SrcType& key,
|
| 105 |
+
Creator creator,
|
| 106 |
+
const std::string& help_msg,
|
| 107 |
+
const RegistryPriority priority = REGISTRY_DEFAULT) {
|
| 108 |
+
Register(key, creator, priority);
|
| 109 |
+
help_message_[key] = help_msg;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
inline bool Has(const SrcType& key) {
|
| 113 |
+
return (registry_.count(key) != 0);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
ObjectPtrType Create(const SrcType& key, Args... args) {
|
| 117 |
+
auto it = registry_.find(key);
|
| 118 |
+
if (it == registry_.end()) {
|
| 119 |
+
// Returns nullptr if the key is not registered.
|
| 120 |
+
return nullptr;
|
| 121 |
+
}
|
| 122 |
+
return it->second(args...);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
/**
|
| 126 |
+
* Returns the keys currently registered as a std::vector.
|
| 127 |
+
*/
|
| 128 |
+
std::vector<SrcType> Keys() const {
|
| 129 |
+
std::vector<SrcType> keys;
|
| 130 |
+
keys.reserve(registry_.size());
|
| 131 |
+
for (const auto& it : registry_) {
|
| 132 |
+
keys.push_back(it.first);
|
| 133 |
+
}
|
| 134 |
+
return keys;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
inline const std::unordered_map<SrcType, std::string>& HelpMessage() const {
|
| 138 |
+
return help_message_;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
const char* HelpMessage(const SrcType& key) const {
|
| 142 |
+
auto it = help_message_.find(key);
|
| 143 |
+
if (it == help_message_.end()) {
|
| 144 |
+
return nullptr;
|
| 145 |
+
}
|
| 146 |
+
return it->second.c_str();
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
// Used for testing, if terminate is unset, Registry throws instead of
|
| 150 |
+
// calling std::exit
|
| 151 |
+
void SetTerminate(bool terminate) {
|
| 152 |
+
terminate_ = terminate;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
private:
|
| 156 |
+
std::unordered_map<SrcType, Creator> registry_;
|
| 157 |
+
std::unordered_map<SrcType, RegistryPriority> priority_;
|
| 158 |
+
bool terminate_{true};
|
| 159 |
+
const bool warning_;
|
| 160 |
+
std::unordered_map<SrcType, std::string> help_message_;
|
| 161 |
+
std::mutex register_mutex_;
|
| 162 |
+
|
| 163 |
+
C10_DISABLE_COPY_AND_ASSIGN(Registry);
|
| 164 |
+
};
|
| 165 |
+
|
| 166 |
+
template <class SrcType, class ObjectPtrType, class... Args>
|
| 167 |
+
class Registerer {
|
| 168 |
+
public:
|
| 169 |
+
explicit Registerer(
|
| 170 |
+
const SrcType& key,
|
| 171 |
+
Registry<SrcType, ObjectPtrType, Args...>* registry,
|
| 172 |
+
typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
|
| 173 |
+
const std::string& help_msg = "") {
|
| 174 |
+
registry->Register(key, creator, help_msg);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
explicit Registerer(
|
| 178 |
+
const SrcType& key,
|
| 179 |
+
const RegistryPriority priority,
|
| 180 |
+
Registry<SrcType, ObjectPtrType, Args...>* registry,
|
| 181 |
+
typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
|
| 182 |
+
const std::string& help_msg = "") {
|
| 183 |
+
registry->Register(key, creator, help_msg, priority);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
template <class DerivedType>
|
| 187 |
+
static ObjectPtrType DefaultCreator(Args... args) {
|
| 188 |
+
return ObjectPtrType(new DerivedType(args...));
|
| 189 |
+
}
|
| 190 |
+
};
|
| 191 |
+
|
| 192 |
+
/**
|
| 193 |
+
* C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function
|
| 194 |
+
* declaration, as well as creating a convenient typename for its corresponding
|
| 195 |
+
* registerer.
|
| 196 |
+
*/
|
| 197 |
+
// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE
|
| 198 |
+
// as import and DEFINE as export, because these registry macros will be used
|
| 199 |
+
// in downstream shared libraries as well, and one cannot use *_API - the API
|
| 200 |
+
// macro will be defined on a per-shared-library basis. Semantically, when one
|
| 201 |
+
// declares a typed registry it is always going to be IMPORT, and when one
|
| 202 |
+
// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE),
|
| 203 |
+
// the instantiation unit is always going to be exported.
|
| 204 |
+
//
|
| 205 |
+
// The only unique condition is when in the same file one does DECLARE and
|
| 206 |
+
// DEFINE - in Windows compilers, this generates a warning that dllimport and
|
| 207 |
+
// dllexport are mixed, but the warning is fine and linker will be properly
|
| 208 |
+
// exporting the symbol. Same thing happens in the gflags flag declaration and
|
| 209 |
+
// definition caes.
|
| 210 |
+
#define C10_DECLARE_TYPED_REGISTRY( \
|
| 211 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
| 212 |
+
C10_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
| 213 |
+
RegistryName(); \
|
| 214 |
+
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
| 215 |
+
Registerer##RegistryName
|
| 216 |
+
|
| 217 |
+
#define TORCH_DECLARE_TYPED_REGISTRY( \
|
| 218 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
| 219 |
+
TORCH_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
| 220 |
+
RegistryName(); \
|
| 221 |
+
typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
|
| 222 |
+
Registerer##RegistryName
|
| 223 |
+
|
| 224 |
+
#define C10_DEFINE_TYPED_REGISTRY( \
|
| 225 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
| 226 |
+
C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
| 227 |
+
RegistryName() { \
|
| 228 |
+
static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
| 229 |
+
registry = new ::c10:: \
|
| 230 |
+
Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>(); \
|
| 231 |
+
return registry; \
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
#define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
|
| 235 |
+
RegistryName, SrcType, ObjectType, PtrType, ...) \
|
| 236 |
+
C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
| 237 |
+
RegistryName() { \
|
| 238 |
+
static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
|
| 239 |
+
registry = \
|
| 240 |
+
new ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>( \
|
| 241 |
+
false); \
|
| 242 |
+
return registry; \
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated
|
| 246 |
+
// creator with comma in its templated arguments.
|
| 247 |
+
#define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) \
|
| 248 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
| 249 |
+
key, RegistryName(), ##__VA_ARGS__);
|
| 250 |
+
|
| 251 |
+
#define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
|
| 252 |
+
RegistryName, key, priority, ...) \
|
| 253 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
| 254 |
+
key, priority, RegistryName(), ##__VA_ARGS__);
|
| 255 |
+
|
| 256 |
+
#define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) \
|
| 257 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
| 258 |
+
key, \
|
| 259 |
+
RegistryName(), \
|
| 260 |
+
Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
|
| 261 |
+
::c10::demangle_type<__VA_ARGS__>());
|
| 262 |
+
|
| 263 |
+
#define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
|
| 264 |
+
RegistryName, key, priority, ...) \
|
| 265 |
+
static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
|
| 266 |
+
key, \
|
| 267 |
+
priority, \
|
| 268 |
+
RegistryName(), \
|
| 269 |
+
Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
|
| 270 |
+
::c10::demangle_type<__VA_ARGS__>());
|
| 271 |
+
|
| 272 |
+
// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use
|
| 273 |
+
// std::string as the key type, because that is the most commonly used cases.
|
| 274 |
+
#define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
|
| 275 |
+
C10_DECLARE_TYPED_REGISTRY( \
|
| 276 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
| 277 |
+
|
| 278 |
+
#define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
|
| 279 |
+
TORCH_DECLARE_TYPED_REGISTRY( \
|
| 280 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
| 281 |
+
|
| 282 |
+
#define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \
|
| 283 |
+
C10_DEFINE_TYPED_REGISTRY( \
|
| 284 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
| 285 |
+
|
| 286 |
+
#define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) \
|
| 287 |
+
C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
|
| 288 |
+
RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
|
| 289 |
+
|
| 290 |
+
#define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
| 291 |
+
C10_DECLARE_TYPED_REGISTRY( \
|
| 292 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
| 293 |
+
|
| 294 |
+
#define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
| 295 |
+
TORCH_DECLARE_TYPED_REGISTRY( \
|
| 296 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
| 297 |
+
|
| 298 |
+
#define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
|
| 299 |
+
C10_DEFINE_TYPED_REGISTRY( \
|
| 300 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
| 301 |
+
|
| 302 |
+
#define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( \
|
| 303 |
+
RegistryName, ObjectType, ...) \
|
| 304 |
+
C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
|
| 305 |
+
RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
|
| 306 |
+
|
| 307 |
+
// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string
|
| 308 |
+
// as the key
|
| 309 |
+
// type, because that is the most commonly used cases.
|
| 310 |
+
#define C10_REGISTER_CREATOR(RegistryName, key, ...) \
|
| 311 |
+
C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__)
|
| 312 |
+
|
| 313 |
+
#define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) \
|
| 314 |
+
C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
|
| 315 |
+
RegistryName, #key, priority, __VA_ARGS__)
|
| 316 |
+
|
| 317 |
+
#define C10_REGISTER_CLASS(RegistryName, key, ...) \
|
| 318 |
+
C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__)
|
| 319 |
+
|
| 320 |
+
#define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) \
|
| 321 |
+
C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
|
| 322 |
+
RegistryName, #key, priority, __VA_ARGS__)
|
| 323 |
+
|
| 324 |
+
} // namespace c10
|
| 325 |
+
|
| 326 |
+
#endif // C10_UTIL_REGISTRY_H_
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/ScopeExit.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <type_traits>
|
| 4 |
+
#include <utility>
|
| 5 |
+
|
| 6 |
+
namespace c10 {
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* Mostly copied from https://llvm.org/doxygen/ScopeExit_8h_source.html
|
| 10 |
+
*/
|
| 11 |
+
template <typename Callable>
|
| 12 |
+
class scope_exit {
|
| 13 |
+
Callable ExitFunction;
|
| 14 |
+
bool Engaged = true; // False once moved-from or release()d.
|
| 15 |
+
|
| 16 |
+
public:
|
| 17 |
+
template <typename Fp>
|
| 18 |
+
// NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
|
| 19 |
+
explicit scope_exit(Fp&& F) : ExitFunction(std::forward<Fp>(F)) {}
|
| 20 |
+
|
| 21 |
+
scope_exit(scope_exit&& Rhs) noexcept
|
| 22 |
+
: ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
|
| 23 |
+
Rhs.release();
|
| 24 |
+
}
|
| 25 |
+
scope_exit(const scope_exit&) = delete;
|
| 26 |
+
scope_exit& operator=(scope_exit&&) = delete;
|
| 27 |
+
scope_exit& operator=(const scope_exit&) = delete;
|
| 28 |
+
|
| 29 |
+
void release() {
|
| 30 |
+
Engaged = false;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
~scope_exit() {
|
| 34 |
+
if (Engaged) {
|
| 35 |
+
ExitFunction();
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
// Keeps the callable object that is passed in, and execute it at the
|
| 41 |
+
// destruction of the returned object (usually at the scope exit where the
|
| 42 |
+
// returned object is kept).
|
| 43 |
+
//
|
| 44 |
+
// Interface is specified by p0052r2.
|
| 45 |
+
template <typename Callable>
|
| 46 |
+
scope_exit<std::decay_t<Callable>> make_scope_exit(Callable&& F) {
|
| 47 |
+
return scope_exit<std::decay_t<Callable>>(std::forward<Callable>(F));
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <array>
|
| 3 |
+
#include <cstddef>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <type_traits>
|
| 6 |
+
|
| 7 |
+
/** Helper class for allocating temporary fixed size arrays with SBO.
|
| 8 |
+
*
|
| 9 |
+
* This is intentionally much simpler than SmallVector, to improve performance
|
| 10 |
+
* at the expense of many features:
|
| 11 |
+
* - No zero-initialization for numeric types
|
| 12 |
+
* - No resizing after construction
|
| 13 |
+
* - No copy/move
|
| 14 |
+
* - No non-trivial types
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
namespace c10 {
|
| 18 |
+
|
| 19 |
+
template <typename T, size_t N>
|
| 20 |
+
class SmallBuffer {
|
| 21 |
+
static_assert(std::is_trivial_v<T>, "SmallBuffer is intended for POD types");
|
| 22 |
+
|
| 23 |
+
std::array<T, N> storage_;
|
| 24 |
+
size_t size_{};
|
| 25 |
+
T* data_{};
|
| 26 |
+
|
| 27 |
+
public:
|
| 28 |
+
SmallBuffer(size_t size) : size_(size) {
|
| 29 |
+
if (size > N) {
|
| 30 |
+
data_ = new T[size];
|
| 31 |
+
} else {
|
| 32 |
+
data_ = &storage_[0];
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
SmallBuffer(const SmallBuffer&) = delete;
|
| 37 |
+
SmallBuffer& operator=(const SmallBuffer&) = delete;
|
| 38 |
+
|
| 39 |
+
// move constructor is needed in function return
|
| 40 |
+
SmallBuffer(SmallBuffer&& rhs) noexcept : size_{rhs.size_} {
|
| 41 |
+
rhs.size_ = 0;
|
| 42 |
+
if (size_ > N) {
|
| 43 |
+
data_ = rhs.data_;
|
| 44 |
+
rhs.data_ = nullptr;
|
| 45 |
+
} else {
|
| 46 |
+
storage_ = std::move(rhs.storage_);
|
| 47 |
+
data_ = &storage_[0];
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
SmallBuffer& operator=(SmallBuffer&&) = delete;
|
| 52 |
+
|
| 53 |
+
~SmallBuffer() {
|
| 54 |
+
if (size_ > N) {
|
| 55 |
+
delete[] data_;
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
T& operator[](size_t idx) {
|
| 59 |
+
return data()[idx];
|
| 60 |
+
}
|
| 61 |
+
const T& operator[](size_t idx) const {
|
| 62 |
+
return data()[idx];
|
| 63 |
+
}
|
| 64 |
+
T* data() {
|
| 65 |
+
return data_;
|
| 66 |
+
}
|
| 67 |
+
const T* data() const {
|
| 68 |
+
return data_;
|
| 69 |
+
}
|
| 70 |
+
size_t size() const {
|
| 71 |
+
return size_;
|
| 72 |
+
}
|
| 73 |
+
T* begin() {
|
| 74 |
+
return data_;
|
| 75 |
+
}
|
| 76 |
+
const T* begin() const {
|
| 77 |
+
return data_;
|
| 78 |
+
}
|
| 79 |
+
T* end() {
|
| 80 |
+
return data_ + size_;
|
| 81 |
+
}
|
| 82 |
+
const T* end() const {
|
| 83 |
+
return data_ + size_;
|
| 84 |
+
}
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
} // namespace c10
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/StringUtil.h
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef C10_UTIL_STRINGUTIL_H_
|
| 2 |
+
#define C10_UTIL_STRINGUTIL_H_
|
| 3 |
+
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <c10/util/string_utils.h>
|
| 6 |
+
#include <c10/util/string_view.h>
|
| 7 |
+
|
| 8 |
+
#include <cstddef>
|
| 9 |
+
#include <ostream>
|
| 10 |
+
#include <sstream>
|
| 11 |
+
#include <string>
|
| 12 |
+
|
| 13 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 14 |
+
#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
|
| 15 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
|
| 16 |
+
#endif
|
| 17 |
+
|
| 18 |
+
namespace c10 {
|
| 19 |
+
|
| 20 |
+
namespace detail {
|
| 21 |
+
|
| 22 |
+
// Obtains the base name from a full path.
|
| 23 |
+
C10_API std::string StripBasename(const std::string& full_path);
|
| 24 |
+
|
| 25 |
+
C10_API std::string ExcludeFileExtension(const std::string& full_path);
|
| 26 |
+
|
| 27 |
+
struct CompileTimeEmptyString {
|
| 28 |
+
operator const std::string&() const {
|
| 29 |
+
static const std::string empty_string_literal;
|
| 30 |
+
return empty_string_literal;
|
| 31 |
+
}
|
| 32 |
+
operator const char*() const {
|
| 33 |
+
return "";
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
template <typename T>
|
| 38 |
+
struct CanonicalizeStrTypes {
|
| 39 |
+
using type = const T&;
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
template <size_t N>
|
| 43 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
| 44 |
+
struct CanonicalizeStrTypes<char[N]> {
|
| 45 |
+
using type = const char*;
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
inline std::ostream& _str(std::ostream& ss) {
|
| 49 |
+
return ss;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
template <typename T>
|
| 53 |
+
inline std::ostream& _str(std::ostream& ss, const T& t) {
|
| 54 |
+
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
|
| 55 |
+
ss << t;
|
| 56 |
+
return ss;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
// Overloads of _str for wide types; forces narrowing.
|
| 60 |
+
C10_API std::ostream& _str(std::ostream& ss, const wchar_t* wCStr);
|
| 61 |
+
C10_API std::ostream& _str(std::ostream& ss, const wchar_t& wChar);
|
| 62 |
+
C10_API std::ostream& _str(std::ostream& ss, const std::wstring& wString);
|
| 63 |
+
|
| 64 |
+
template <>
|
| 65 |
+
inline std::ostream& _str<CompileTimeEmptyString>(
|
| 66 |
+
std::ostream& ss,
|
| 67 |
+
const CompileTimeEmptyString&) {
|
| 68 |
+
return ss;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template <typename T, typename... Args>
|
| 72 |
+
inline std::ostream& _str(std::ostream& ss, const T& t, const Args&... args) {
|
| 73 |
+
return _str(_str(ss, t), args...);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template <typename... Args>
|
| 77 |
+
struct _str_wrapper final {
|
| 78 |
+
static std::string call(const Args&... args) {
|
| 79 |
+
std::ostringstream ss;
|
| 80 |
+
_str(ss, args...);
|
| 81 |
+
return ss.str();
|
| 82 |
+
}
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
// Specializations for already-a-string types.
|
| 86 |
+
template <>
|
| 87 |
+
struct _str_wrapper<std::string> final {
|
| 88 |
+
// return by reference to avoid the binary size of a string copy
|
| 89 |
+
static const std::string& call(const std::string& str) {
|
| 90 |
+
return str;
|
| 91 |
+
}
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
template <>
|
| 95 |
+
struct _str_wrapper<const char*> final {
|
| 96 |
+
static const char* call(const char* str) {
|
| 97 |
+
return str;
|
| 98 |
+
}
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
// For c10::str() with an empty argument list (which is common in our assert
|
| 102 |
+
// macros), we don't want to pay the binary size for constructing and
|
| 103 |
+
// destructing a stringstream or even constructing a string.
|
| 104 |
+
template <>
|
| 105 |
+
struct _str_wrapper<> final {
|
| 106 |
+
static CompileTimeEmptyString call() {
|
| 107 |
+
return CompileTimeEmptyString();
|
| 108 |
+
}
|
| 109 |
+
};
|
| 110 |
+
|
| 111 |
+
} // namespace detail
|
| 112 |
+
|
| 113 |
+
// Convert a list of string-like arguments into a single string.
|
| 114 |
+
template <typename... Args>
|
| 115 |
+
inline decltype(auto) str(const Args&... args) {
|
| 116 |
+
return detail::_str_wrapper<
|
| 117 |
+
typename detail::CanonicalizeStrTypes<Args>::type...>::call(args...);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
template <class Container>
|
| 121 |
+
inline std::string Join(const std::string& delimiter, const Container& v) {
|
| 122 |
+
std::stringstream s;
|
| 123 |
+
int cnt = static_cast<int64_t>(v.size()) - 1;
|
| 124 |
+
for (auto i = v.begin(); i != v.end(); ++i, --cnt) {
|
| 125 |
+
s << (*i) << (cnt ? delimiter : "");
|
| 126 |
+
}
|
| 127 |
+
return s.str();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
// Replace all occurrences of "from" substring to "to" string.
|
| 131 |
+
// Returns number of replacements
|
| 132 |
+
size_t C10_API
|
| 133 |
+
ReplaceAll(std::string& s, c10::string_view from, c10::string_view to);
|
| 134 |
+
|
| 135 |
+
/// Represents a location in source code (for debugging).
|
| 136 |
+
struct C10_API SourceLocation {
|
| 137 |
+
const char* function;
|
| 138 |
+
const char* file;
|
| 139 |
+
uint32_t line;
|
| 140 |
+
};
|
| 141 |
+
|
| 142 |
+
std::ostream& operator<<(std::ostream& out, const SourceLocation& loc);
|
| 143 |
+
|
| 144 |
+
// unix isprint but insensitive to locale
|
| 145 |
+
inline bool isPrint(char s) {
|
| 146 |
+
return s > 0x1f && s < 0x7f;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
inline void printQuotedString(std::ostream& stmt, const string_view str) {
|
| 150 |
+
stmt << "\"";
|
| 151 |
+
for (auto s : str) {
|
| 152 |
+
switch (s) {
|
| 153 |
+
case '\\':
|
| 154 |
+
stmt << "\\\\";
|
| 155 |
+
break;
|
| 156 |
+
case '\'':
|
| 157 |
+
stmt << "\\'";
|
| 158 |
+
break;
|
| 159 |
+
case '\"':
|
| 160 |
+
stmt << "\\\"";
|
| 161 |
+
break;
|
| 162 |
+
case '\a':
|
| 163 |
+
stmt << "\\a";
|
| 164 |
+
break;
|
| 165 |
+
case '\b':
|
| 166 |
+
stmt << "\\b";
|
| 167 |
+
break;
|
| 168 |
+
case '\f':
|
| 169 |
+
stmt << "\\f";
|
| 170 |
+
break;
|
| 171 |
+
case '\n':
|
| 172 |
+
stmt << "\\n";
|
| 173 |
+
break;
|
| 174 |
+
case '\r':
|
| 175 |
+
stmt << "\\r";
|
| 176 |
+
break;
|
| 177 |
+
case '\t':
|
| 178 |
+
stmt << "\\t";
|
| 179 |
+
break;
|
| 180 |
+
case '\v':
|
| 181 |
+
stmt << "\\v";
|
| 182 |
+
break;
|
| 183 |
+
default:
|
| 184 |
+
if (isPrint(s)) {
|
| 185 |
+
stmt << s;
|
| 186 |
+
} else {
|
| 187 |
+
// C++ io has stateful formatting settings. Messing with
|
| 188 |
+
// them is probably worse than doing this manually.
|
| 189 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
| 190 |
+
char buf[4] = "000";
|
| 191 |
+
// NOLINTNEXTLINE(*narrowing-conversions)
|
| 192 |
+
buf[2] += s % 8;
|
| 193 |
+
s /= 8;
|
| 194 |
+
// NOLINTNEXTLINE(*narrowing-conversions)
|
| 195 |
+
buf[1] += s % 8;
|
| 196 |
+
s /= 8;
|
| 197 |
+
// NOLINTNEXTLINE(*narrowing-conversions)
|
| 198 |
+
buf[0] += s;
|
| 199 |
+
stmt << "\\" << buf;
|
| 200 |
+
}
|
| 201 |
+
break;
|
| 202 |
+
}
|
| 203 |
+
}
|
| 204 |
+
stmt << "\"";
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
} // namespace c10
|
| 208 |
+
|
| 209 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
| 210 |
+
|
| 211 |
+
#endif // C10_UTIL_STRINGUTIL_H_
|
videochat2/lib/python3.10/site-packages/torch/include/c10/util/Synchronized.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <mutex>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
/**
|
| 8 |
+
* A very simple Synchronization class for error-free use of data
|
| 9 |
+
* in a multi-threaded context. See folly/docs/Synchronized.md for
|
| 10 |
+
* the inspiration of this class.
|
| 11 |
+
*
|
| 12 |
+
* Full URL:
|
| 13 |
+
* https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md
|
| 14 |
+
*
|
| 15 |
+
* This class implements a small subset of the generic functionality
|
| 16 |
+
* implemented by folly:Synchronized<T>. Specifically, only withLock<T>
|
| 17 |
+
* is implemented here since it's the smallest possible API that is
|
| 18 |
+
* able to cover a large surface area of functionality offered by
|
| 19 |
+
* folly::Synchronized<T>.
|
| 20 |
+
*/
|
| 21 |
+
template <typename T>
|
| 22 |
+
class Synchronized final {
|
| 23 |
+
mutable std::mutex mutex_;
|
| 24 |
+
T data_;
|
| 25 |
+
|
| 26 |
+
public:
|
| 27 |
+
Synchronized() = default;
|
| 28 |
+
Synchronized(T const& data) : data_(data) {}
|
| 29 |
+
Synchronized(T&& data) : data_(std::move(data)) {}
|
| 30 |
+
|
| 31 |
+
// Don't permit copy construction, move, assignment, or
|
| 32 |
+
// move assignment, since the underlying std::mutex
|
| 33 |
+
// isn't necessarily copyable/moveable.
|
| 34 |
+
Synchronized(Synchronized const&) = delete;
|
| 35 |
+
Synchronized(Synchronized&&) = delete;
|
| 36 |
+
Synchronized operator=(Synchronized const&) = delete;
|
| 37 |
+
Synchronized operator=(Synchronized&&) = delete;
|
| 38 |
+
|
| 39 |
+
/**
|
| 40 |
+
* To use, call withLock<T> with a callback that accepts T either
|
| 41 |
+
* by copy or by reference. Use the protected variable in the
|
| 42 |
+
* provided callback safely.
|
| 43 |
+
*/
|
| 44 |
+
template <typename CB>
|
| 45 |
+
auto withLock(CB&& cb) {
|
| 46 |
+
std::lock_guard<std::mutex> guard(this->mutex_);
|
| 47 |
+
return std::forward<CB>(cb)(this->data_);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
/**
|
| 51 |
+
* To use, call withLock<T> with a callback that accepts T either
|
| 52 |
+
* by copy or by const reference. Use the protected variable in
|
| 53 |
+
* the provided callback safely.
|
| 54 |
+
*/
|
| 55 |
+
template <typename CB>
|
| 56 |
+
auto withLock(CB&& cb) const {
|
| 57 |
+
std::lock_guard<std::mutex> guard(this->mutex_);
|
| 58 |
+
return std::forward<CB>(cb)(this->data_);
|
| 59 |
+
}
|
| 60 |
+
};
|
| 61 |
+
} // end namespace c10
|