python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
import torch._C._lazy
def render_ir_graph(tensors):
"""Return a text dump of the LTC IR graph in dot format for the tensors.
The text can be processed by tools like dot to be rendered in pdf,png etc."""
return torch._C._lazy._get_tensors_dot(tensors)
def dump_ir(tensors, ir_format):
"""Return a dump... | pytorch-master | torch/_lazy/debug.py |
import copy
import dataclasses
import itertools
import os
from typing import Any, Callable, Dict, List
import torch
import torch._lazy as lazy
import torch._lazy.metrics as metrics
from torch import fx
from torch._lazy import computation, debug as lazy_debug
from torch._lazy.tensor_factory_functions import tensor_fact... | pytorch-master | torch/_lazy/extract_compiled_graph.py |
import torch._C._lazy
def dump(dot_file_name: str):
"""Dump TrieCache in the dot format"""
return torch._C._lazy._dump_ir_cache(dot_file_name)
def reset():
"""Clear TrieCache. This is needed in testing to avoid
node reusing between different tests.
"""
return torch._C._lazy._clear_ir_cache()... | pytorch-master | torch/_lazy/ir_cache.py |
import torch._C._lazy_ts_backend
def init():
"""Initializes the lazy Torchscript backend"""
torch._C._lazy_ts_backend._init()
| pytorch-master | torch/_lazy/ts_backend.py |
import torch._C._lazy
import torch._C._lazy_ts_backend
def get_tensors_ts_device_data_node(tensors):
"""Return tensor ids and eager tensors for DeviceData nodes in the
IR for the passed in lazy tensors.
TODO: This API is currently ts backend specific. We are working on
generalizing it to all backends... | pytorch-master | torch/_lazy/computation.py |
# -*- coding: utf-8 -*-
import warnings
# A workaround to support both TorchScript and MyPy:
from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
from torch import Tensor
from . import _docs
if TYPE_CHECKING:
from torch.types import _dtype as DType
DimOrDims = Optional[Union[int... | pytorch-master | torch/_masked/__init__.py |
# -*- coding: utf-8 -*-
# This file is generated, do not modify it!
#
# To update this file, run the update masked docs script as follows:
#
# python tools/update_masked_docs.py
#
# The script must be called from an environment where the development
# version of torch package can be imported and is functional.
#
ama... | pytorch-master | torch/_masked/_docs.py |
pytorch-master | torch/ao/__init__.py | |
# Variables
from ._mappings import get_dynamic_sparse_quantized_mapping
from ._mappings import get_static_sparse_quantized_mapping
# Sparsifier
from .sparsifier.base_sparsifier import BaseSparsifier
from .sparsifier.weight_norm_sparsifier import WeightNormSparsifier
from .sparsifier.nearly_diagonal_sparsifier import N... | pytorch-master | torch/ao/sparsity/__init__.py |
import torch
import torch.ao.nn
def get_static_sparse_quantized_mapping():
_static_sparse_quantized_mapping = dict({
torch.nn.Linear: torch.ao.nn.sparse.quantized.Linear,
})
return _static_sparse_quantized_mapping
def get_dynamic_sparse_quantized_mapping():
_dynamic_sparse_quantized_mapping = ... | pytorch-master | torch/ao/sparsity/_mappings.py |
from functools import reduce
from typing import Tuple
import torch
import torch.nn.functional as F
from .base_sparsifier import BaseSparsifier
__all__ = ["WeightNormSparsifier"]
def _flat_idx_to_2d(idx, shape):
rows = idx // shape[1]
cols = idx % shape[1]
return rows, cols
class WeightNormSparsifier(Ba... | pytorch-master | torch/ao/sparsity/sparsifier/weight_norm_sparsifier.py |
pytorch-master | torch/ao/sparsity/sparsifier/__init__.py | |
from typing import Any, Dict, Optional
from torch import nn
__all__ = [
"module_to_fqn",
"fqn_to_module",
"get_arg_info_from_tensor_fqn",
"FakeSparsity",
]
def module_to_fqn(model: nn.Module, module: nn.Module, prefix: str = "") -> Optional[str]:
"""
Returns the fqn for a module or None if m... | pytorch-master | torch/ao/sparsity/sparsifier/utils.py |
import torch
from . import base_sparsifier
class NearlyDiagonalSparsifier(base_sparsifier.BaseSparsifier):
r"""Nearly Diagonal Sparsifier
This sparsifier creates a nearly diagonal mask to be applied to the weight matrix.
Nearly Diagonal Matrix is a matrix that contains non-zero elements near the diagona... | pytorch-master | torch/ao/sparsity/sparsifier/nearly_diagonal_sparsifier.py |
import abc
import copy
from collections import defaultdict
from typing import Any, Dict, Optional, Set, Tuple, List, Type
import torch
from torch import nn
from torch.nn.utils import parametrize
from .utils import (
FakeSparsity,
get_arg_info_from_tensor_fqn,
module_to_fqn,
)
__all__ = ["BaseSparsifier"]... | pytorch-master | torch/ao/sparsity/sparsifier/base_sparsifier.py |
pytorch-master | torch/ao/sparsity/_experimental/__init__.py | |
import abc
import torch
from typing import Optional, Tuple, List, Any, Dict
from ...sparsifier import base_sparsifier
from collections import defaultdict
from torch import nn
import copy
from ...sparsifier import utils
from torch.nn.utils import parametrize
import sys
import warnings
if not sys.warnoptions:
# to s... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/base_data_sparsifier.py |
from .base_data_sparsifier import BaseDataSparsifier
from .data_norm_sparsifier import DataNormSparsifier
__all__ = [
"BaseDataSparsifier",
"DataNormSparsifier",
]
| pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/__init__.py |
import torch
from torch.nn import functional as F
from functools import reduce
from typing import Tuple, Any, List
from .base_data_sparsifier import BaseDataSparsifier
__all__ = ['DataNormSparsifier']
class DataNormSparsifier(BaseDataSparsifier):
r"""L1-Norm Sparsifier
This sparsifier computes the *L1-norm*... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/data_norm_sparsifier.py |
import torch
import torch.nn as nn
from torch.ao.sparsity.sparsifier.utils import module_to_fqn, fqn_to_module
from typing import Dict, List
SUPPORTED_MODULES = {
nn.Embedding,
nn.EmbeddingBag
}
def _fetch_all_embeddings(model):
"""Fetches Embedding and EmbeddingBag modules from the model
"""
emb... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/quantization_utils.py |
pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/lightning/__init__.py | |
from collections import defaultdict
from copy import deepcopy
import torch
from typing import Any, Optional, Dict
import pytorch_lightning as pl # type: ignore[import]
from ._data_sparstity_utils import (
_attach_model_to_data_sparsifier,
_log_sparsified_level,
_get_valid_name
)
class PostTrainingDataSp... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/lightning/callbacks/data_sparsity.py |
pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/lightning/callbacks/__init__.py | |
import logging
from torch.ao.sparsity._experimental.data_sparsifier.base_data_sparsifier import SUPPORTED_TYPES
logger: logging.Logger = logging.getLogger(__name__)
def _attach_model_to_data_sparsifier(module, data_sparsifier, config=None):
"""Attaches a data sparsifier to all the layers of the module.
Essen... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py |
from torch.ao.sparsity._experimental.data_sparsifier.data_norm_sparsifier import DataNormSparsifier
from torch.ao.sparsity._experimental.data_scheduler.base_data_scheduler import BaseDataScheduler
import torch
import torch.nn as nn
from typing import List
from torch.ao.sparsity._experimental.data_sparsifier.lightning.c... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/lightning/tests/test_callbacks.py |
from typing import Dict, List
import torch
import time
from torch.ao.sparsity._experimental.data_sparsifier import DataNormSparsifier
import os
from dlrm_utils import get_dlrm_model, get_valid_name # type: ignore[import]
import copy
import zipfile
from zipfile import ZipFile
import pandas as pd # type: ignore[import]... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/evaluate_disk_savings.py |
from typing import Dict, List
import torch
from dlrm_s_pytorch import unpack_batch # type: ignore[import]
import numpy as np # type: ignore[import]
import time
from dlrm_utils import make_test_data_loader, fetch_model, dlrm_wrap # type: ignore[import]
import pandas as pd # type: ignore[import]
import argparse
def... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/evaluate_forward_time.py |
from typing import Dict, List
import torch
from dlrm_s_pytorch import unpack_batch # type: ignore[import]
import numpy as np # type: ignore[import]
import sklearn # type: ignore[import]
from dlrm_utils import make_test_data_loader, dlrm_wrap, fetch_model
import pandas as pd # type: ignore[import]
import argparse
... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/evaluate_model_metrics.py |
import torch
from dlrm_s_pytorch import DLRM_Net # type: ignore[import]
import numpy as np # type: ignore[import]
from dlrm_data_pytorch import CriteoDataset, collate_wrapper_criteo_offset # type: ignore[import]
import zipfile
import os
class SparseDLRM(DLRM_Net):
"""The SparseDLRM model is a wrapper around th... | pytorch-master | torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/dlrm_utils.py |
from .base_pruner import BasePruner
from .parametrization import (
ActivationReconstruction,
BiasHook,
PruningParametrization,
ZeroesParametrization,
)
__all__ = [
"ActivationReconstruction",
"BasePruner",
"BiasHook",
"PruningParametrization",
"ZeroesParametrization",
]
| pytorch-master | torch/ao/sparsity/_experimental/pruner/__init__.py |
import copy
import warnings
import abc
import torch
from torch import nn
from torch.nn.utils import parametrize
from torch.nn.modules.container import ModuleDict, ModuleList
from .parametrization import PruningParametrization, ZeroesParametrization, ActivationReconstruction, BiasHook
from torch.ao.sparsity import ... | pytorch-master | torch/ao/sparsity/_experimental/pruner/base_pruner.py |
import torch
from torch import nn
from typing import Any, List
__all__ = ['PruningParametrization', 'ZeroesParametrization', 'ActivationReconstruction', 'BiasHook']
class PruningParametrization(nn.Module):
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(o... | pytorch-master | torch/ao/sparsity/_experimental/pruner/parametrization.py |
from typing import Dict, Any, List
import torch
from collections import defaultdict
from torch import nn
import copy
from ...sparsifier.utils import fqn_to_module, module_to_fqn
import warnings
__all__ = ['ActivationSparsifier']
class ActivationSparsifier:
r"""
The Activation sparsifier class aims to sparsif... | pytorch-master | torch/ao/sparsity/_experimental/activation_sparsifier/activation_sparsifier.py |
pytorch-master | torch/ao/sparsity/_experimental/activation_sparsifier/__init__.py | |
from .base_data_scheduler import BaseDataScheduler
__all__ = [
"BaseDataScheduler",
]
| pytorch-master | torch/ao/sparsity/_experimental/data_scheduler/__init__.py |
from functools import wraps
import weakref
import abc
import warnings
from ..data_sparsifier import BaseDataSparsifier
__all__ = ['BaseDataScheduler']
class BaseDataScheduler(object):
r"""
The BaseDataScheduler is the abstract scheduler class specifically for the
BaseDataSparsifier class. This class con... | pytorch-master | torch/ao/sparsity/_experimental/data_scheduler/base_data_scheduler.py |
from torch.ao.sparsity import BaseSparsifier
from functools import wraps
import warnings
import weakref
__all__ = ["BaseScheduler"]
class BaseScheduler(object):
def __init__(self, sparsifier, last_epoch=-1, verbose=False):
# Attach sparsifier
if not isinstance(sparsifier, BaseSparsifier):
... | pytorch-master | torch/ao/sparsity/scheduler/base_scheduler.py |
pytorch-master | torch/ao/sparsity/scheduler/__init__.py | |
import warnings
from .base_scheduler import BaseScheduler
__all__ = ["LambdaSL"]
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsi... | pytorch-master | torch/ao/sparsity/scheduler/lambda_scheduler.py |
from torch.ao.nn import sparse
| pytorch-master | torch/ao/nn/__init__.py |
from . import quantized
| pytorch-master | torch/ao/nn/sparse/__init__.py |
from typing import Optional
import torch
from torch.nn.quantized.modules.utils import _quantize_weight, hide_packed_params_repr
__all__ = ['LinearPackedParams', 'Linear']
# TODO (zaf): Inherit from `quantized.LinearPackedParams` (T83294430)
class LinearPackedParams(torch.nn.Module):
_version = 1
def __init_... | pytorch-master | torch/ao/nn/sparse/quantized/linear.py |
from torch.ao.nn.sparse.quantized import dynamic
from .linear import Linear
from .linear import LinearPackedParams
__all__ = [
"dynamic",
"Linear",
"LinearPackedParams",
]
| pytorch-master | torch/ao/nn/sparse/quantized/__init__.py |
import threading
def is_valid_linear_block_sparse_pattern(row_block_size, col_block_size):
return (row_block_size == 1 and col_block_size == 4) or \
(row_block_size == 8 and col_block_size == 1)
# This is a stop-gap measure as current flow does not allow module
# specific block sparse pattern.
# Infact... | pytorch-master | torch/ao/nn/sparse/quantized/utils.py |
from typing import Optional
from torch.ao.nn.sparse.quantized import linear
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
import torch
import torch.nn.intrinsic as nni
from torch.nn.quantized.modules.utils import _quantize_weight, hide_packed_params_repr
__all__ = ['Linear']
class Linear(t... | pytorch-master | torch/ao/nn/sparse/quantized/dynamic/linear.py |
from .linear import Linear
__all__ = [
"Linear",
]
| pytorch-master | torch/ao/nn/sparse/quantized/dynamic/__init__.py |
"""
Numeric Suite Core APIs for define-by-run quantization.
Experimental, API may change at any time.
"""
import functools
from typing import Tuple, Any, Optional, List, Dict
import torch
from torch.ao.quantization._dbr.quantization_state import (
AutoQuantizationState,
)
def _turn_on_loggers(name: str, model:... | pytorch-master | torch/ao/ns/_numeric_suite_dbr.py |
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from torch.ao.quantization import prepare
from typing import Dict, List, Optional, Any, Union, Callable, Set
from torch.ao.quantization.quantization_mappings import (
get_default_compare_output_module_list... | pytorch-master | torch/ao/ns/_numeric_suite.py |
pytorch-master | torch/ao/ns/__init__.py | |
"""
This module contains tooling to compare weights and activations
across models. Example usage::
import copy
import torch
import torch.quantization.quantize_fx as quantize_fx
import torch.ao.ns._numeric_suite_fx as ns
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)).eval()
mp = quantize_fx.... | pytorch-master | torch/ao/ns/_numeric_suite_fx.py |
import torch
from torch.fx import GraphModule, map_arg
from torch.fx.graph import Graph, Node
from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix
from .utils import (
get_node_first_input_and_output_type,
getattr_from_fqn,
NodeInputOrOutputType,
return_first_non_observer_node,
... | pytorch-master | torch/ao/ns/fx/graph_passes.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.quantized.dynamic as nnqd
import torch.nn.quantized as nnq
import torch.nn.intrinsic.qat as nniqat
import torch.nn.qat as nnqat
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
toq = torch.ops.quantized
from t... | pytorch-master | torch/ao/ns/fx/weight_utils.py |
import collections
import enum
import torch
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Graph, Node
from torch.ao.quantization.utils import getattr_from_fqn
from .ns_types import NSSubgraph, NSNodeTargetType
from .mappings import (
get_base_name_to_sets_of_related_ops,
... | pytorch-master | torch/ao/ns/fx/graph_matcher.py |
pytorch-master | torch/ao/ns/fx/__init__.py | |
import enum
import operator
import torch
import torch.nn as nn
import torch.nn.intrinsic.quantized as nniq
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from typing import Tuple, Callable, Dict, Set, List, Optional, Union
from torch.fx import GraphModule
from torch.fx.graph import Node
from torch.ao.qua... | pytorch-master | torch/ao/ns/fx/utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Node
from torch.ao.quantization.utils import getattr_from_fqn
from .ns_types import NSNodeTargetType
from torch.ao.quantization.fx.backend_config_utils import get_na... | pytorch-master | torch/ao/ns/fx/pattern_utils.py |
import enum
from typing import NamedTuple
from torch.fx.graph import Node
from typing import Dict, Any, List, Union, Callable
class NSSingleResultValuesType(str, enum.Enum):
WEIGHT = 'weight'
NODE_OUTPUT = 'node_output'
NODE_INPUT = 'node_input'
NSSubgraph = NamedTuple(
'NSSubgraph',
[('start_no... | pytorch-master | torch/ao/ns/fx/ns_types.py |
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
toq = torch.ops.quantized
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.nn.intrinsic.qat as nniq... | pytorch-master | torch/ao/ns/fx/mappings.py |
"""
This module implements observers which are used to collect statistics about
the values observed during calibration (PTQ) or training (QAT).
"""
import re
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from functools import partial
from typing import Any, List, Tuple, Op... | pytorch-master | torch/ao/quantization/observer.py |
import copy
import torch.nn as nn
from torch.ao.quantization.fuser_method_mappings import get_fuser_method
# for backward compatiblity
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401
from torch.... | pytorch-master | torch/ao/quantization/fuse_modules.py |
import torch
from torch.nn.parameter import Parameter
class _LearnableFakeQuantize(torch.ao.quantization.FakeQuantizeBase):
r""" This is an extension of the FakeQuantize module in fake_quantize.py, which
supports more generalized lower-bit quantization and support learning of the scale
and zero point para... | pytorch-master | torch/ao/quantization/_learnable_fake_quantize.py |
import copy
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.nn.intrinsic.qat as nniqat
import torch.nn.quantized as nnq
import torch.nn.quantized._reference ... | pytorch-master | torch/ao/quantization/quantization_mappings.py |
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.ao.quantization
import torch.ao.ns._numeric_suite as ns
_supported_modules = {nn.Linear, nn.Conv2d}
_supported_modules_quantized = {nnq.Linear, nnq.Conv2d}
def get_module(model, name):
''' Given name of submodule, this function grab... | pytorch-master | torch/ao/quantization/_correct_bias.py |
import copy
import itertools
import warnings
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.intrinsic import _FusedModule
from torch.ao.quantization.quantization_mappings import (
get_default_dynamic_quant_module_mappings,
get_default_static_quant_module_mappings,
get_de... | pytorch-master | torch/ao/quantization/quantize.py |
import torch
import copy
from typing import Dict, Any
_supported_types = {torch.nn.Conv2d, torch.nn.Linear}
_supported_intrinsic_types = {torch.nn.intrinsic.ConvReLU2d, torch.nn.intrinsic.LinearReLU}
_all_supported_types = _supported_types.union(_supported_intrinsic_types)
def set_module_weight(module, weight) -> Non... | pytorch-master | torch/ao/quantization/_equalize.py |
# TODO: the name of this file is probably confusing, remove this file and move the type
# definitions to somewhere else, e.g. to .utils
from typing import Any, Tuple, Union
from torch.fx import Node
from .utils import Pattern # noqa: F401
NodePattern = Union[Tuple[Node, Node], Tuple[Node, Tuple[Node, Node]], Any]
# ... | pytorch-master | torch/ao/quantization/quantization_types.py |
"""
This module implements modules which are used to perform fake quantization
during QAT.
"""
import torch
from torch.nn import Module
from torch.ao.quantization.observer import (
MovingAverageMinMaxObserver,
HistogramObserver,
MovingAveragePerChannelMinMaxObserver,
FixedQParamsObserver,
default_f... | pytorch-master | torch/ao/quantization/fake_quantize.py |
from collections import namedtuple
from typing import Optional, Any, Union
import torch
import torch.nn as nn
from torch.ao.quantization.fake_quantize import (
FakeQuantize,
FakeQuantizeBase,
default_fake_quant,
default_dynamic_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_f... | pytorch-master | torch/ao/quantization/qconfig.py |
# flake8: noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules # noqa: F403
from .fuse_modules import fuse_modules_qat # noqa: F403
from .fuser_method_mappings import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .qconfig_mappi... | pytorch-master | torch/ao/quantization/__init__.py |
from __future__ import annotations
from collections import OrderedDict
from typing import Any, Callable, Dict, Tuple, Union
import torch
from .fake_quantize import (
default_weight_fake_quant,
FixedQParamsFakeQuantize,
)
from .observer import (
_PartialWrapper,
default_fixed_qparams_range_0to1_observe... | pytorch-master | torch/ao/quantization/qconfig_mapping.py |
from torch import nn
class QuantStub(nn.Module):
r"""Quantize stub module, before calibration, this is same as an observer,
it will be swapped as `nnq.Quantize` in `convert`.
Args:
qconfig: quantization configuration for the tensor,
if qconfig is not provided, we will get qconfig from... | pytorch-master | torch/ao/quantization/stubs.py |
"""
Utils shared by different modes of quantization (eager/graph)
"""
import warnings
import functools
import torch
from torch.ao.quantization.quant_type import QuantType
from typing import Tuple, Any, Union, Callable, Dict, Optional
from torch.nn.utils.parametrize import is_parametrized
from collections import Ordered... | pytorch-master | torch/ao/quantization/utils.py |
import torch.nn as nn
import torch.nn.intrinsic as nni
from typing import Union, Callable, Tuple, Dict, Optional, Type
from torch.ao.quantization.utils import Pattern
from torch.ao.quantization.utils import get_combined_dict
from torch.ao.quantization.utils import MatchAllNode
import itertools
def fuse_conv_bn(is_qa... | pytorch-master | torch/ao/quantization/fuser_method_mappings.py |
import torch
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization.quant_type import QuantType
from torch.jit._recursive import wrap_cpp_module
def _check_is_script_module(model):
if not isinstance(model, torch.jit.ScriptModule):
raise ValueError('input must be a script module, got:... | pytorch-master | torch/ao/quantization/quantize_jit.py |
import torch
from ._dbr.auto_trace import add_auto_observation, add_auto_convert
from ._dbr.fusion import get_module_fusion_fqns
from ._dbr.qconfig_mapping_utils import normalize_object_types
from .qconfig_mapping_utils import (
get_flattened_qconfig_dict,
)
from torch.ao.quantization.qconfig_mapping import QConf... | pytorch-master | torch/ao/quantization/_quantize_dbr.py |
import enum
__all__ = [
"QuantType",
"quant_type_to_str",
]
# Quantization type (dynamic quantization, static quantization).
# Should match the c++ enum in quantization_type.h
class QuantType(enum.IntEnum):
DYNAMIC = 0
STATIC = 1
QAT = 2
WEIGHT_ONLY = 3
_quant_type_to_str = {
QuantType.ST... | pytorch-master | torch/ao/quantization/quant_type.py |
from typing import Any, Dict, Optional, Set, Tuple, Union
import warnings
import torch
from torch.fx import GraphModule
from .fx.tracer import QuantizationTracer
from .fx import fuse # noqa: F401
from .fx import prepare # noqa: F401
from .fx.convert import convert
from .backend_config import ( # noqa: F401
Back... | pytorch-master | torch/ao/quantization/quantize_fx.py |
import re
from typing import Dict, Callable, Union
from .utils import (
get_combined_dict,
_parent_name,
)
from .quantization_mappings import (
get_default_qat_module_mappings,
)
from .qconfig import QConfigAny
from .qconfig_mapping import QConfigMapping
# TODO: revisit this list. Many helper methods sho... | pytorch-master | torch/ao/quantization/qconfig_mapping_utils.py |
"""
This module implements nonuniform observers used to collect statistics about
the values observed during calibration (PTQ) or training (QAT).
"""
import torch
import itertools
import matplotlib.pyplot as plt
from torch.ao.quantization.observer import ObserverBase
from torch.ao.quantization.experimental.apot_utils i... | pytorch-master | torch/ao/quantization/experimental/observer.py |
r"""
This file contains utility functions to convert values
using APoT nonuniform quantization methods.
"""
import math
r"""Converts floating point input into APoT number
based on quantization levels
"""
def float_to_apot(x, levels, indices, alpha):
# clip values based on alpha
if x < -alpha:
retu... | pytorch-master | torch/ao/quantization/experimental/apot_utils.py |
import torch
from torch import Tensor
from torch.ao.quantization.experimental.quantizer import quantize_APoT, dequantize_APoT
class fake_quantize_function(torch.autograd.Function):
@staticmethod
def forward(ctx, # type: ignore[override]
x: Tensor,
alpha: Tensor,
... | pytorch-master | torch/ao/quantization/experimental/fake_quantize_function.py |
import torch
from torch.ao.quantization.experimental.quantizer import APoTQuantizer
# class to store APoT quantized tensor
class TensorAPoT():
quantizer: APoTQuantizer
data: torch.Tensor
def __init__(self, quantizer: APoTQuantizer, apot_data: torch.Tensor):
self.quantizer = quantizer
self.... | pytorch-master | torch/ao/quantization/experimental/APoT_tensor.py |
import torch
import numpy as np
from torch.nn.quantized.modules.utils import WeightedQuantizedModule
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.experimental.quantizer import quantize_APoT
class LinearAPoT(WeightedQuantizedModule):
r"""
A quantized linear mo... | pytorch-master | torch/ao/quantization/experimental/linear.py |
import torch
from torch import Tensor
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.fake_quantize import FakeQuantizeBase
from torch.ao.quantization.experimental.fake_quantize_function import fake_quantize_function
class APoTFakeQuantize(FakeQuantizeBase):
alpha: T... | pytorch-master | torch/ao/quantization/experimental/fake_quantize.py |
import torch
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization import MinMaxObserver
from torch.ao.quantization.fake_quantize import FakeQuantize
from torch.ao.quantization.experimental.fake_quantize import APoTFakeQuantize
"""
Default symmetric fake_quant for activations.
"""
default_symmet... | pytorch-master | torch/ao/quantization/experimental/qconfig.py |
import torch
from torch import Tensor
import numpy as np
from torch.ao.quantization.experimental.apot_utils import float_to_apot, apot_to_float, quant_dequant_util
# class to store APoT quantizer and
# implement quantize and dequantize
class APoTQuantizer():
alpha: torch.Tensor
gamma: torch.Tensor
quantiza... | pytorch-master | torch/ao/quantization/experimental/quantizer.py |
import operator
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.qat as nniqat
import torch.nn.qat as nnqat
import torch.nn.quantized._reference as nnqr
from collections import namedtuple
from typing import List
from .observation_type import O... | pytorch-master | torch/ao/quantization/backend_config/_common_operator_config_utils.py |
from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig
from .native import get_native_backend_config, get_native_backend_config_dict
from .observation_type import ObservationType
from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict
__all__ = [
"get_native_backen... | pytorch-master | torch/ao/quantization/backend_config/__init__.py |
from typing import List
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.qat as nnqat
import torch.nn.quantized._reference as nnqr
from ._common_operator_config_utils import (
_get_binary_op_configs,
_get_linear_configs,
_get_conv_configs,
_get_share_qparams_op_configs... | pytorch-master | torch/ao/quantization/backend_config/native.py |
from enum import Enum
__all__ = ['ObservationType']
class ObservationType(Enum):
# this means input and output are observed with different observers, based
# on qconfig.activation
# example: conv, linear, softmax
OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0
# this means the output will use the same ... | pytorch-master | torch/ao/quantization/backend_config/observation_type.py |
from typing import Dict, Any, List, Callable, Union, Tuple, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
from .backend_config import BackendConfig, DTypeConfig
from ..quantization_types import Pattern
def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTyp... | pytorch-master | torch/ao/quantization/backend_config/utils.py |
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Type
import torch
from torch.ao.quantization.backend_config.observation_type import ObservationType
from torch.ao.quantization.observer import _PartialWrapper
from torch.ao.quantization.utils im... | pytorch-master | torch/ao/quantization/backend_config/backend_config.py |
import torch
from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig
from .observation_type import ObservationType
from ._common_operator_config_utils import (
_get_binary_op_configs,
_get_linear_configs,
_get_conv_configs,
_get_share_qparams_op_configs,
)
def get_tensorrt_backend_... | pytorch-master | torch/ao/quantization/backend_config/tensorrt.py |
import torch
import copy
from torch.fx import GraphModule
from torch.fx.graph import Graph
from typing import Union, Dict, Any, Set
class FusedGraphModule(GraphModule):
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
self.preserved_attr_names ... | pytorch-master | torch/ao/quantization/fx/graph_module.py |
import torch
from torch.fx.graph import Node, Graph
from ..utils import _parent_name
from torch.ao.quantization.quantization_types import NodePattern, Pattern
from ..fuser_method_mappings import get_fuser_method_new
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Union, List
from .... | pytorch-master | torch/ao/quantization/fx/fusion_patterns.py |
import torch
from collections import defaultdict, OrderedDict
from typing import Callable, Any, Dict, Tuple, Set, List
from torch.ao.quantization import QConfig
from torch.ao.quantization.qconfig import add_module_to_qconfig_obs_ctr, QConfigAny, qconfig_equals
from torch.ao.quantization.quantize import (
is_activat... | pytorch-master | torch/ao/quantization/fx/qconfig_utils.py |
from ._lower_to_native_backend import _lower_to_native_backend
from .graph_module import QuantizedGraphModule
from ..qconfig import QConfigAny
from typing import Dict, Tuple
__all__ = ['lower_to_fbgemm']
def lower_to_fbgemm(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_sco... | pytorch-master | torch/ao/quantization/fx/lower_to_fbgemm.py |
import warnings
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
from torch.fx import GraphModule
from torch.fx.graph import Node
from ..observer import _with_args, ObserverBase, ... | pytorch-master | torch/ao/quantization/fx/_equalize.py |
from ._lower_to_native_backend import _lower_to_native_backend
from .graph_module import QuantizedGraphModule
from ..qconfig import QConfigAny
from typing import Dict, Tuple
def lower_to_qnnpack(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_scope: Dict[str, Tuple[str, type]... | pytorch-master | torch/ao/quantization/fx/lower_to_qnnpack.py |
from typing import Any, Dict, List, Optional, Set, Tuple, Union, Type
from torch.ao.quantization.quant_type import QuantType
import torch
import copy
import warnings
from torch.fx import (
GraphModule,
)
from torch.fx.graph import (
Graph,
Node,
Argument,
)
from ..utils import (
activation_is_static... | pytorch-master | torch/ao/quantization/fx/convert.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.