python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
pytorch-master
benchmarks/instruction_counts/execution/__init__.py
"""Handle the details of subprocess calls and retries for a given benchmark run.""" import dataclasses import json import os import pickle import signal import subprocess import time from typing import List, Optional, Union, TYPE_CHECKING import uuid from core.api import AutoLabels from core.types import Label from co...
pytorch-master
benchmarks/instruction_counts/execution/work.py
"""Collect instruction counts for continuous integration.""" import argparse import hashlib import json import time from typing import Dict, List, Union from core.expand import materialize from definitions.standard import BENCHMARKS from execution.runner import Runner from execution.work import WorkOrder REPEATS = 5...
pytorch-master
benchmarks/instruction_counts/applications/ci.py
pytorch-master
benchmarks/instruction_counts/applications/__init__.py
pytorch-master
benchmarks/instruction_counts/worker/__init__.py
"""File invoked through subprocess to actually carry out measurements. `worker/main.py` is deliberately isolated from the rest of the benchmark infrastructure. Other parts of the benchmark rely on this file, but `worker/` has only one Python file and does not import ANYTHING from the rest of the benchmark suite. The r...
pytorch-master
benchmarks/instruction_counts/worker/main.py
"""Default set of benchmarks. Parser notes: `parse_stmts`: - Width for the left (Python) column MUST be 40 characters. - The column separator is " | ", not "|". Whitespace matters. `GroupedVariants`: - `Setup` and `Global_Setup` (case insensitive) are reserved keywords to pop...
pytorch-master
benchmarks/instruction_counts/definitions/standard.py
pytorch-master
benchmarks/instruction_counts/definitions/__init__.py
"""Define some common setup blocks which benchmarks can reuse.""" import enum from core.api import GroupedSetup from core.utils import parse_stmts _TRIVIAL_2D = GroupedSetup( r"x = torch.ones((4, 4))", r"auto x = torch::ones({4, 4});" ) _TRIVIAL_3D = GroupedSetup( r"x = torch.ones((4, 4, 4))", r"a...
pytorch-master
benchmarks/instruction_counts/definitions/setup.py
# Taken from https://github.com/pytorch/audio/blob/master/torchaudio/models/wav2letter.py # So that we don't need torchaudio to be installed import torch from torch import Tensor from torch import nn import torch.nn.functional as F import math from collections import OrderedDict from typing import Tuple, Optional __...
pytorch-master
benchmarks/functional_autograd_benchmark/torchaudio_models.py
import torch from torch import Tensor import torchvision_models as models from utils import check_for_functorch, extract_weights, load_weights, GetterReturnType from typing import cast has_functorch = check_for_functorch() def get_resnet18(device: torch.device) -> GetterReturnType: N = 32 model = models.re...
pytorch-master
benchmarks/functional_autograd_benchmark/vision_models.py
import torch from torch.autograd import functional import time from argparse import ArgumentParser from collections import defaultdict from typing import NamedTuple, Callable, List, Any try: import functorch as ft has_functorch = True print(f"Found functorch: {ft.__version__}") except ImportError: has...
pytorch-master
benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py
# Taken from https://github.com/pytorch/vision # So that we don't need torchvision to be installed import torch from torch import nn from torch.nn import functional as F from torch.jit.annotations import Dict from collections import OrderedDict try: from scipy.optimize import linear_sum_assignment scipy_avail...
pytorch-master
benchmarks/functional_autograd_benchmark/torchvision_models.py
import torch from collections import defaultdict from torch import nn, Tensor from typing import List, Tuple, Dict, Union, Callable # Type helpers InputsType = Union[Tensor, Tuple[Tensor, ...]] # A Getter takes in a device and returns a callable and the inputs to that callable GetterReturnType = Tuple[Callable[..., ...
pytorch-master
benchmarks/functional_autograd_benchmark/utils.py
import torch from torch import Tensor import torch.distributions as dist from utils import GetterReturnType def get_simple_regression(device: torch.device) -> GetterReturnType: N = 10 K = 10 loc_beta = 0. scale_beta = 1. beta_prior = dist.Normal(loc_beta, scale_beta) X = torch.rand(N, K + 1...
pytorch-master
benchmarks/functional_autograd_benchmark/ppl_models.py
import torch from torch import nn, Tensor import torchaudio_models as models from utils import check_for_functorch, extract_weights, load_weights, GetterReturnType has_functorch = check_for_functorch() def get_wav2letter(device: torch.device) -> GetterReturnType: N = 10 input_frames = 700 vocab_size =...
pytorch-master
benchmarks/functional_autograd_benchmark/audio_text_models.py
import argparse from collections import defaultdict from utils import to_markdown_table, from_markdown_table def main(): parser = argparse.ArgumentParser("Main script to compare results from the benchmarks") parser.add_argument("--before", type=str, default="before.txt", help="Text file containing the times t...
pytorch-master
benchmarks/functional_autograd_benchmark/compare.py
from benchmark_core import _register_test from benchmark_pytorch import create_pytorch_op_test_case def generate_pt_test(configs, pt_bench_op): """ This function creates PyTorch op test based on the given operator """ _register_test(configs, pt_bench_op, create_pytorch_op_test_case, False) def generate_...
pytorch-master
benchmarks/operator_benchmark/benchmark_test_generator.py
from caffe2.python import workspace from caffe2.python import core from caffe2.proto import caffe2_pb2 import benchmark_utils from collections import namedtuple from benchmark_test_generator import _register_test """Caffe2 performance microbenchmarks. This module contains Caffe2-specific functionalities for performan...
pytorch-master
benchmarks/operator_benchmark/benchmark_caffe2.py
import time import json import torch import benchmark_cpp_extension # noqa: F401 """PyTorch performance microbenchmarks. This module contains PyTorch-specific functionalities for performance microbenchmarks. """ class TorchBenchmarkBase(torch.nn.Module): """ This is a base class used to create Pytorch operator...
pytorch-master
benchmarks/operator_benchmark/benchmark_pytorch.py
import operator_benchmark as op_bench from pt import ( # noqa: F401 unary_test, ) import benchmark_all_other_test # noqa: F401 import benchmark_all_quantized_test # noqa: F401 if __name__ == "__main__": op_bench.benchmark_runner.main()
pytorch-master
benchmarks/operator_benchmark/benchmark_all_test.py
# TODO (mingzhe09088): get rid of noqa import benchmark_runner # noqa: F401 from benchmark_pytorch import TorchBenchmarkBase # noqa: F401 from benchmark_test_generator import * # noqa: F401,F403 from benchmark_utils import * # noqa: F401,F403
pytorch-master
benchmarks/operator_benchmark/operator_benchmark.py
pytorch-master
benchmarks/operator_benchmark/__init__.py
import operator_benchmark as op_bench from pt import ( # noqa: F401 add_test, as_strided_test, batchnorm_test, binary_test, cat_test, channel_shuffle_test, chunk_test, conv_test, diag_test, embeddingbag_test, fill_test, gather_test, linear_test, matmul_test, nan_to_num_test, pool_test, softmax_test, ha...
pytorch-master
benchmarks/operator_benchmark/benchmark_all_other_test.py
import argparse import torch import benchmark_core import benchmark_utils """Performance microbenchmarks's main binary. This is the main function for running performance microbenchmark tests. It also registers existing benchmark tests via Python module imports. """ parser = argparse.ArgumentParser( description=...
pytorch-master
benchmarks/operator_benchmark/benchmark_runner.py
import numpy as np import itertools import random import os import bisect """Performance microbenchmarks's utils. This module contains utilities for writing microbenchmark tests. """ # Here are the reserved keywords in the benchmark suite _reserved_keywords = {"probs", "total_samples", "tags"} _supported_devices = ...
pytorch-master
benchmarks/operator_benchmark/benchmark_utils.py
import functools import numpy as np import timeit import json import torch import copy import ast # needs to be imported after torch import torch.utils.cpp_extension as cpp_extension # noqa: F401 import benchmark_utils from collections import namedtuple """Performance microbenchmarks. This module contains core fun...
pytorch-master
benchmarks/operator_benchmark/benchmark_core.py
import operator_benchmark as op_bench from pt import ( # noqa: F401 qactivation_test, qarithmetic_test, qbatchnorm_test, qcat_test, qcomparators_test, qconv_test, qgroupnorm_test, qinstancenorm_test, qinterpolate_test, qlayernorm_test, qlinear_test, qobserver_test, q...
pytorch-master
benchmarks/operator_benchmark/benchmark_all_quantized_test.py
from setuptools import setup from torch.utils.cpp_extension import CppExtension, BuildExtension setup(name='benchmark_cpp_extension', ext_modules=[CppExtension('benchmark_cpp_extension', ['extension.cpp'])], cmdclass={'build_ext': BuildExtension})
pytorch-master
benchmarks/operator_benchmark/pt_extension/setup.py
import unittest import benchmark_cpp_extension # noqa: F401 import torch class TestConsumeOp(unittest.TestCase): def test_jit_consume_op(self): iters = 6 def foo(x): for i in range(iters): result = torch.ops.operator_benchmark._consume(torch.sum(x)) retur...
pytorch-master
benchmarks/operator_benchmark/pt_extension/cpp_extension_test.py
pytorch-master
benchmarks/operator_benchmark/common/__init__.py
import numpy as np import torch import time """Microbenchmarks for Tensor repeat operator. Supports PyTorch.""" input_shapes = ( (4, 4, 1), (16, 1, 32), (64, 64, 1, 1), (8, 256, 128), (1, 64, 128, 32), (512, 512), ) repeats = ...
pytorch-master
benchmarks/operator_benchmark/common/repeat_benchmark.py
import operator_benchmark as op_bench import torch intraop_bench_configs = op_bench.config_list( attrs=[ [8, 16], ], attr_names=["M", "N"], tags=["short"], ) @torch.jit.script def torch_sumall(a, iterations): # type: (Tensor, int) result = 0.0 for _ in range(iterations): re...
pytorch-master
benchmarks/operator_benchmark/common/tests/jit_forward_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch.""" add_short_configs = op_bench.config_list( attr_names=['M', 'N', 'K'], attrs=[ [8, 16, 32], [16, 16, 64], [64, 64, 128], ], cross_product_configs...
pytorch-master
benchmarks/operator_benchmark/common/tests/pt_configs_list_test.py
import operator_benchmark as op_bench import torch configs = op_bench.random_sample_configs( M=[1, 2, 3, 4, 5, 6], N=[7, 8, 9, 10, 11, 12], K=[13, 14, 15, 16, 17, 18], # probs saves the weights of each value probs=op_bench.attr_probs( M=[0.5, 0.2, 0.1, 0.05, 0.03, 0.1], N=[0.1, 0.3...
pytorch-master
benchmarks/operator_benchmark/common/tests/random_sample_test.py
import operator_benchmark as op_bench import torch add_configs = op_bench.cross_product_configs( M=[8], N=[8], K=[8], device=["cuda", "cpu"], tags=["short"] ) class AddBenchmark(op_bench.TorchBenchmarkBase): def init(self, M, N, K, device): self.input_one = torch.rand(M, N, K, device...
pytorch-master
benchmarks/operator_benchmark/common/tests/pt_cpu_gpu_forward_backward_test.py
import operator_benchmark as op_bench from caffe2.python import core add_configs = op_bench.cross_product_configs( M=[8], N=[8], K=[8], tags=["short"], device=["cuda", "cpu"] ) class AddBenchmark(op_bench.Caffe2BenchmarkBase): def init(self, M, N, K, device): self.set_module_name("add...
pytorch-master
benchmarks/operator_benchmark/common/tests/c2_cpu_gpu_forward_backward_test.py
import operator_benchmark as op_bench import torch # Configs for pointwise unary ops unary_ops_configs = op_bench.config_list( attrs=[ [128, 128], ], attr_names=["M", "N"], tags=["short"] ) unary_ops_list = op_bench.op_list( attr_names=["op_name", "op_func"], attrs=[ ["abs", ...
pytorch-master
benchmarks/operator_benchmark/common/tests/add_ops_list_test.py
import operator_benchmark as op_bench import torch add_configs = op_bench.cross_product_configs( M=[8, 1], N=[8, 2], K=[8, 4], tags=["short"] ) # This benchmark uses the auto_set to automatically set requires_grad # for both inputs. The test name can also be used for filtering. class AddBenchmark(op_...
pytorch-master
benchmarks/operator_benchmark/common/tests/pt_backward_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for quantized layernorm operator.""" layernorm_configs_short = op_bench.cross_product_configs( dims=( (1, 8, 16), (8, 8, 16), (32, 8, 16), (64, 128, 56, 56), ), dtype=(torch.qint8,), tags=["short"],...
pytorch-master
benchmarks/operator_benchmark/pt/qlayernorm_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for diag operator""" # Configs for PT diag operator diag_configs_short = op_bench.config_list( attr_names=['dim', 'M', 'N', 'diagonal', 'out'], attrs=[ [1, 64, 64, 0, True], [2, 128, 128, -10, False], [1, 256, 256,...
pytorch-master
benchmarks/operator_benchmark/pt/diag_test.py
import operator_benchmark as op_bench import torch import numpy from pt import configs """Embedding and EmbeddingBag Operator Benchmark""" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device): self.embed...
pytorch-master
benchmarks/operator_benchmark/pt/embeddingbag_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for quantized groupnorm operator.""" groupnorm_configs_short = op_bench.cross_product_configs( dims=( (32, 8, 16), (32, 8, 56, 56), ), num_groups=(2, 4), dtype=(torch.qint8,), tags=["short"], ) class QGroupNo...
pytorch-master
benchmarks/operator_benchmark/pt/qgroupnorm_test.py
import operator_benchmark as op_bench import torch import torch.nn.quantized as nnq from typing import List """Microbenchmarks for quantized Cat operator""" # Configs for PT Cat operator qcat_configs_short = op_bench.config_list( attr_names=['M', 'N', 'K', 'L', 'dim'], attrs=[ [256, 512, 1, 2, 0], ...
pytorch-master
benchmarks/operator_benchmark/pt/qcat_test.py
import operator_benchmark as op_bench import torch import torch.nn.quantized as nnq import torch.ao.quantization as tq import torch.nn as nn """Microbenchmarks for general quantization operations.""" # mode is used to show the direction of the benchmark: # if 'Q', benchmark quantization, else dequantization quantiz...
pytorch-master
benchmarks/operator_benchmark/pt/quantization_test.py
import operator_benchmark as op_bench import torch from torch import nn from torch.ao import sparsity """Microbenchmarks for sparsifier.""" sparse_configs_short = op_bench.config_list( attr_names=["M", "SL", "SBS", "ZPB"], attrs=[ [(32, 16), 0.3, (4, 1), 2], [(32, 16), 0.6, (1, 4), 4], ...
pytorch-master
benchmarks/operator_benchmark/pt/ao_sparsifier_test.py
import operator_benchmark as op_bench import torch from typing import List """Microbenchmarks for as_strided operator""" # Configs for PT as_strided operator as_strided_configs_short = op_bench.config_list( attr_names=["M", "N", "size", "stride", "storage_offset"], attrs=[ [8, 8, (2, 2), (1, 1), 0],...
pytorch-master
benchmarks/operator_benchmark/pt/as_strided_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for remainder operators.""" # Benchmark ops performance with broadcast remainder_ops_list = op_bench.op_list( attr_names=['op_name', 'op_func'], attrs=[ ['fmod', torch.fmod], ['remainder', torch.remainder], ], ) remai...
pytorch-master
benchmarks/operator_benchmark/pt/remainder_test.py
import operator_benchmark as op_bench """ Configs shared by multiple benchmarks """ def remove_cuda(config_list): cuda_config = {'device': 'cuda'} return [config for config in config_list if cuda_config not in config] # Configs for conv-1d ops conv_1d_configs_short = op_bench.config_list( attr_names=[ ...
pytorch-master
benchmarks/operator_benchmark/pt/configs.py
import operator_benchmark as op_bench import torch """Microbenchmarks for quantized batchnorm operator.""" batchnorm_configs_short = op_bench.config_list( attr_names=["M", "N", "K"], attrs=[ [1, 256, 3136], ], cross_product_configs={ 'device': ['cpu'], 'dtype': (torch.qint8,)...
pytorch-master
benchmarks/operator_benchmark/pt/qbatchnorm_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch.""" # Configs for PT add operator add_long_configs = op_bench.cross_product_configs( M=[8, 128], N=[32, 64], K=[256, 512], device=['cpu', 'cuda'], tags=["long"] ) add_short_confi...
pytorch-master
benchmarks/operator_benchmark/pt/add_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for point-wise unary operator.""" # Configs for pointwise unary ops unary_ops_configs_short = op_bench.config_list( attr_names=['M', 'N'], attrs=[ [512, 512], ], cross_product_configs={ 'device': ['cpu', 'cuda'], ...
pytorch-master
benchmarks/operator_benchmark/pt/unary_test.py
import operator_benchmark as op_bench import torch import torch.ao.quantization.observer as obs qobserver_short_configs_dict = { 'attr_names': ('C', 'M', 'N', 'dtype', 'device'), 'attrs': ( (3, 512, 512, torch.quint8, 'cpu'), (3, 512, 512, torch.quint8, 'cuda'), ), 'tags': ('short',), ...
pytorch-master
benchmarks/operator_benchmark/pt/qobserver_test.py
import operator_benchmark as op_bench import torch import torch.nn.functional as F """Microbenchmarks for instancenorm operator.""" instancenorm_configs_short = op_bench.cross_product_configs( dims=( (32, 8, 16), (32, 8, 56, 56), ), tags=["short"], ) class InstanceNormBenchmark(op_benc...
pytorch-master
benchmarks/operator_benchmark/pt/instancenorm_test.py
import operator_benchmark as op_bench import torch from torch.testing._internal.common_device_type import get_all_device_types """Microbenchmark for Fill_ operator.""" fill_short_configs = op_bench.config_list( attr_names=["N"], attrs=[ [1], [1024], [2048], ], cross_product_co...
pytorch-master
benchmarks/operator_benchmark/pt/fill_test.py
import operator_benchmark as op_bench import torch import torch.nn.quantized as nnq from pt import configs """ Microbenchmarks for qConv operators. """ class QConv1dBenchmark(op_bench.TorchBenchmarkBase): # def init(self, N, IC, OC, L, G, kernel, stride, pad): def init(self, IC, OC, kernel, stride, N, L, de...
pytorch-master
benchmarks/operator_benchmark/pt/qconv_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for quantized unary operators (point-wise and reduction).""" # Configs for pointwise and reduction unary ops qunary_ops_configs_short = op_bench.config_list( attr_names=['M', 'N'], attrs=[ [512, 512], ], cross_product_con...
pytorch-master
benchmarks/operator_benchmark/pt/qunary_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for interpolate operator.""" class InterpolateBenchmark(op_bench.TorchBenchmarkBase): def init(self, input_size, output_size, channels_last=False, mode='linear', dtype=torch.float): input_image = torch.randint(0, 256, size=input_size,...
pytorch-master
benchmarks/operator_benchmark/pt/interpolate_test.py
import operator_benchmark as op_bench import torch """ Microbenchmarks for the gelu operators. """ gelu_configs_long = op_bench.cross_product_configs( N=[1, 4], C=[3], H=[16, 256], W=[16, 256], device=['cpu'], tags=['long'] ) class GeluBenchmark(op_bench.TorchBenchmarkBase): def init(s...
pytorch-master
benchmarks/operator_benchmark/pt/gelu_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch.""" class BmmBenchmark(op_bench.TorchBenchmarkBase): def init(self, B, M, N, K, device, op): self.inputs = { "batch1": torch.rand((B, M, K), device=device, requires_grad=self.a...
pytorch-master
benchmarks/operator_benchmark/pt/bmm_test.py
import operator_benchmark as op_bench import torch import torch.nn as nn from pt import configs """Microbenchmarks for Linear operator.""" class LinearBenchmark(op_bench.TorchBenchmarkBase): def init(self, N, IN, OUT, device): self.inputs = { "input_one": torch.rand(N, IN, device=device) ...
pytorch-master
benchmarks/operator_benchmark/pt/linear_test.py
import torch import torch.nn.quantized as nnq import operator_benchmark as op_bench r"""Microbenchmarks for the quantized activations.""" qactivation_long_configs = op_bench.cross_product_configs( dims=( # VGG-16 relu's with original shape: (-1, 3, 224, 224) ( 64, 224, 224), # ReLU-1 # noqa: E...
pytorch-master
benchmarks/operator_benchmark/pt/qactivation_test.py
import operator_benchmark as op_bench import torch import torch.nn as nn from pt import configs """ Microbenchmarks for Conv1d and ConvTranspose1d operators. """ class Conv1dBenchmark(op_bench.TorchBenchmarkBase): def init(self, IC, OC, kernel, stride, N, L, device): self.inputs = { "input":...
pytorch-master
benchmarks/operator_benchmark/pt/conv_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for Split operator""" # Configs for PT Split operator split_configs_short = op_bench.config_list( attr_names=["M", "N", "parts"], attrs=[ [8, 8, 2], [256, 512, 2], [512, 512, 2], ], cross_product_configs={ ...
pytorch-master
benchmarks/operator_benchmark/pt/split_test.py
pytorch-master
benchmarks/operator_benchmark/pt/__init__.py
import operator_benchmark as op_bench import torch import torch.nn.qat as nnqat import numpy from pt import configs from torch.ao.quantization import default_embedding_qat_qconfig """ Microbenchmarks for QAT Embedding + EmbeddingBag operators. """ class QATEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def in...
pytorch-master
benchmarks/operator_benchmark/pt/qatembedding_ops_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for binary operators.""" # Benchmark ops performance with broadcast binary_ops_bcast_list = op_bench.op_list( attr_names=['op_name', 'op_func'], attrs=[ ['add', torch.add], ], ) # Configs with broadcast binary_configs_broadca...
pytorch-master
benchmarks/operator_benchmark/pt/binary_test.py
import operator_benchmark as op_bench import torch import random from typing import List """Microbenchmarks for Cat operator""" cross_product_configs = { 'device': ['cpu', 'cuda'], } # Configs for PT Cat operator cat_configs_short = op_bench.config_list( attr_names=['sizes', 'N', 'dim'], attrs=[ ...
pytorch-master
benchmarks/operator_benchmark/pt/cat_test.py
import operator_benchmark as op_bench import torch import random from typing import List """Microbenchmarks for Stack operator""" # Configs for PT stack operator stack_configs_static_runtime = op_bench.config_list( attr_names=['sizes', 'N'], attrs=[ [(20, 40), 5], [(1, 40), 5], ], cro...
pytorch-master
benchmarks/operator_benchmark/pt/stack_test.py
import operator_benchmark as op_bench import torch """ Microbenchmarks for batch matrix mult with einsum and torch.bmm. """ batch_mm_configs_short = op_bench.config_list( attr_names=["B", "M", "N", "K"], attrs=[ [4, 5, 3, 2], [32, 25, 20, 30], [128, 100, 120, 110], ], cross_pro...
pytorch-master
benchmarks/operator_benchmark/pt/matrix_mult_test.py
import operator_benchmark as op_bench import torch import torch.nn.functional as F """Microbenchmarks for groupnorm operator.""" groupnorm_configs_short = op_bench.cross_product_configs( dims=( (32, 8, 16), (32, 8, 56, 56), ), num_groups=(2, 4), tags=["short"], ) class GroupNormBen...
pytorch-master
benchmarks/operator_benchmark/pt/groupnorm_test.py
import operator_benchmark as op_bench import torch import torch.nn.functional as F """Microbenchmarks for layernorm operator.""" layernorm_configs_short = op_bench.cross_product_configs( dims=( (1, 8, 16), (8, 8, 16), (32, 8, 16), (64, 128, 56, 56), ), tags=["short"], ) ...
pytorch-master
benchmarks/operator_benchmark/pt/layernorm_test.py
import operator_benchmark as op_bench import torch import torch.nn as nn """ Microbenchmarks for the hardsigmoid operator. """ # Configs for hardsigmoid ops hardsigmoid_configs_short = op_bench.config_list( attr_names=[ 'N', 'C', 'H', 'W' ], attrs=[ [1, 3, 256, 256], [4, 3, 256,...
pytorch-master
benchmarks/operator_benchmark/pt/hardsigmoid_test.py
import operator_benchmark as op_bench import torch # Configs for pointwise and reduction unary ops qmethods_configs_short = op_bench.config_list( attr_names=['M', 'N'], attrs=[ [32, 32], ], cross_product_configs={ 'dtype': [torch.quint8], 'contig': [False, True], }, tags...
pytorch-master
benchmarks/operator_benchmark/pt/qtensor_method_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for Chunk operator""" # Configs for PT Chunk operator chunk_short_configs = op_bench.config_list( attr_names=["M", "N", "chunks"], attrs=[ [8, 8, 2], [256, 512, 2], [512, 512, 2], ], cross_product_configs={...
pytorch-master
benchmarks/operator_benchmark/pt/chunk_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for sum reduction operator.""" # Configs for PT add operator sum_configs = op_bench.cross_product_configs( R=[64, 256], # Length of reduced dimension V=[32, 512], # Length of other dimension dim=[0, 1], contiguous=[True, False], ...
pytorch-master
benchmarks/operator_benchmark/pt/sum_test.py
import operator_benchmark as op_bench import torch import torch.nn as nn """ Microbenchmarks for the softmax operators. """ # Configs for softmax ops softmax_configs_short = op_bench.config_list( attr_names=[ 'N', 'C', 'H', 'W' ], attrs=[ [1, 3, 256, 256], [4, 3, 256, 256], ...
pytorch-master
benchmarks/operator_benchmark/pt/softmax_test.py
import operator_benchmark as op_bench import torch import torch.nn as nn """ Microbenchmarks for the hardswish operators. """ # Configs for hardswish ops hardswish_configs_short = op_bench.config_list( attr_names=[ 'N', 'C', 'H', 'W' ], attrs=[ [1, 3, 256, 256], [4, 3, 256, 256]...
pytorch-master
benchmarks/operator_benchmark/pt/hardswish_test.py
import operator_benchmark as op_bench import torch import torch.nn.functional as F """Microbenchmarks for batchnorm operator.""" # Benchmark cudnn if available if torch.backends.cudnn.is_available: def cudnn_benchmark_configs(configs): result = [] for config in configs: is_cuda = any...
pytorch-master
benchmarks/operator_benchmark/pt/batchnorm_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for channel_shuffle operator.""" # Configs for PT channel_shuffle operator channel_shuffle_long_configs = op_bench.cross_product_configs( batch_size=[4, 8], channels_per_group=[32, 64], height=[32, 64], width=[32, 64], groups=...
pytorch-master
benchmarks/operator_benchmark/pt/channel_shuffle_test.py
import operator_benchmark as op_bench import torch import math """Microbenchmarks for torch.nan_to_num / nan_to_num_ operators""" # Configs for PT torch.nan_to_num / nan_to_num_ operators nan_to_num_ops_list = op_bench.op_list( attr_names=['op_name', 'op_func'], attrs=[ ['nan_to_num', torch.nan_to_n...
pytorch-master
benchmarks/operator_benchmark/pt/nan_to_num_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for quantized instancenorm operator.""" instancenorm_configs_short = op_bench.cross_product_configs( dims=( (32, 8, 16), (32, 8, 56, 56), ), dtype=(torch.qint8,), tags=["short"], ) class QInstanceNormBenchmark(op...
pytorch-master
benchmarks/operator_benchmark/pt/qinstancenorm_test.py
import operator_benchmark as op_bench import torch '''Microbenchmarks for the quantized interpolate op. Note: We are not benchmarking `upsample` as it is being depricated, and calls the `interpolate` anyway. ''' qinterpolate_long_configs = op_bench.config_list( attr_names=['M', 'N', 'K'], attrs=[ [51...
pytorch-master
benchmarks/operator_benchmark/pt/qinterpolate_test.py
import torch from torch._ops import ops import operator_benchmark as op_bench qarithmetic_binary_configs = op_bench.cross_product_configs( N=(2, 8, 64, 512), dtype=(torch.quint8, torch.qint8, torch.qint32), contig=(False, True), tags=('short',) ) qarithmetic_binary_ops = op_bench.op_list( attrs=(...
pytorch-master
benchmarks/operator_benchmark/pt/qarithmetic_test.py
import operator_benchmark as op_bench import torch from torch import nn """ Microbenchmarks for RNNs. """ qrnn_configs = op_bench.config_list( attrs=[ [1, 3, 1], [5, 7, 4], ], # names: input_size, hidden_size, num_layers attr_names=["I", "H", "NL"], cross_product_configs={ ...
pytorch-master
benchmarks/operator_benchmark/pt/qrnn_test.py
import operator_benchmark as op_bench import torch tensor_conversion_short_configs = op_bench.cross_product_configs( M=(8, 16, 32,), N=(16, 64, 128,), device=['cpu', 'cuda'], tags=['short'], ) tensor_conversion_long_configs = op_bench.cross_product_configs( M=(64, 128, 256, 512,), N=(256, 512,...
pytorch-master
benchmarks/operator_benchmark/pt/tensor_to_test.py
import operator_benchmark as op_bench import torch import torch.nn.quantized as nnq import torch.nn.quantized.dynamic as nnqd from pt import configs """ Microbenchmarks for Quantized Linear operators. """ class _QLinearBenchmarkBase(op_bench.TorchBenchmarkBase): def init(self, N, IN, OUT, linear_under_test): ...
pytorch-master
benchmarks/operator_benchmark/pt/qlinear_test.py
import operator_benchmark as op_bench import torch embeddingbag_conversion_short_configs = op_bench.cross_product_configs( num_embeddings=(80,), embedding_dim=(128, 256, 512), tags=('short',) ) embeddingbag_conversion_long_configs = op_bench.cross_product_configs( num_embeddings=(100, 120, 1000), ...
pytorch-master
benchmarks/operator_benchmark/pt/qembedding_pack_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for ClipRanges operator.""" torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators") # Configs for C2 ClipRanges operator clip_ranges_long_configs = op_bench.cross_product_configs( LENGTH=range(1, 100), M=[1], N=[2], ...
pytorch-master
benchmarks/operator_benchmark/pt/clip_ranges_test.py
import operator_benchmark as op_bench import torch import numpy """Microbenchmarks for index_select operator.""" # An example input from this configuration is M=4, N=4, dim=0. index_select_configs_short = op_bench.config_list( attr_names=["M", "N", "K", "dim"], attrs=[ [8, 8, 1, 1], [256, 512...
pytorch-master
benchmarks/operator_benchmark/pt/index_select_test.py
import operator_benchmark as op_bench import torch import numpy as np from typing import Optional from torch.testing._internal.common_quantization import ( lengths_to_offsets ) torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators") embedding_bag_rowwise_offsets_short_configs = op_bench.cross_p...
pytorch-master
benchmarks/operator_benchmark/pt/qembedding_bag_lookups_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for MatMul operator""" # Configs for PT Matmul operator mm_short_configs = op_bench.config_list( attr_names=["M", "N", "K", "trans_a", "trans_b"], attrs=[ [1, 1, 1, True, False], [128, 128, 128, True, False], [256, 2...
pytorch-master
benchmarks/operator_benchmark/pt/matmul_test.py
import torch import operator_benchmark as op_bench qcomparators_configs = op_bench.cross_product_configs( N=(8, 64), dtype=(torch.quint8, torch.qint8, torch.qint32), contig=(False, True), other_scalar=(False, True), out_variant=(False, True), tags=('short',) ) qcomparators_ops = op_bench.op_l...
pytorch-master
benchmarks/operator_benchmark/pt/qcomparators_test.py
import torch import operator_benchmark as op_bench # 2D pooling will have input matrix of rank 3 or 4 qpool2d_long_configs = op_bench.config_list( attrs=( # C H W k s p ( 1, 3, 3, (3, 3), (1, 1), (0, 0)), # dummy # noqa: E201,E241 ( 3, 64, 64, (3, 3), (...
pytorch-master
benchmarks/operator_benchmark/pt/qpool_test.py
import operator_benchmark as op_bench import torch import numpy """Microbenchmarks for gather operator.""" # An example input from this configuration is M=4, N=4, dim=0. gather_configs_short = op_bench.config_list( attr_names=["M", "N", "dim"], attrs=[ [256, 512, 0], [512, 512, 1], ], ...
pytorch-master
benchmarks/operator_benchmark/pt/gather_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for linear_unpack_fp16_ operator. Supports both Caffe2/PyTorch.""" # Configs for PT linear_unpack_fp16 operator linear_unpack_fp16_long_configs = op_bench.cross_product_configs( M=[8, 128], N=[32, 64], K=[256, 512], device=['cpu'], ...
pytorch-master
benchmarks/operator_benchmark/pt/linear_unpack_fp16_test.py
import operator_benchmark as op_bench import torch import torch.nn.quantized as nnq import numpy from pt import configs """ Microbenchmarks for qEmbeddingBag operators. """ class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last...
pytorch-master
benchmarks/operator_benchmark/pt/qembeddingbag_test.py
import operator_benchmark as op_bench import torch """Microbenchmarks for linear_prepack_fp16_ operator. Supports both Caffe2/PyTorch.""" # Configs for PT linear_prepack_fp16 operator linear_prepack_fp16_long_configs = op_bench.cross_product_configs( M=[8, 128], N=[32, 64], K=[256, 512], device=['cpu'...
pytorch-master
benchmarks/operator_benchmark/pt/linear_prepack_fp16_test.py
import operator_benchmark as op_bench import torch import torch.nn as nn """ Microbenchmarks for MaxPool1d and AvgPool1d operators. """ # Configs for pool-1d ops pool_1d_configs_short = op_bench.config_list( attr_names=[ 'kernel', 'stride', 'N', 'C', 'L' ], attrs=[ [3, 1, 8, 256, 256], ...
pytorch-master
benchmarks/operator_benchmark/pt/pool_test.py
import operator_benchmark as op_bench import benchmark_caffe2 as op_bench_c2 import random from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401 from caffe2.python import core """Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch.""" cross_product_configs = { 'device': ['cpu', 'cuda'], ...
pytorch-master
benchmarks/operator_benchmark/c2/concat_test.py