python_code
stringlengths
0
258k
import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F import math def is_iterable(maybe_iterable): return isinstance(maybe_iterable, list) or isinstance(maybe_iterable, tuple) def flatten_list(tens_list): """ flatten_list """ if not is_iterable(...
import torch from torch.nn._functions.rnn import LSTMCell, RNNReLUCell, RNNTanhCell, GRUCell from .RNNBackend import bidirectionalRNN, stackedRNN, RNNCell from .cells import mLSTMRNNCell, mLSTMCell def toRNNBackend(inputRNN, num_layers, bidirectional=False, dropout = 0): """ :class:`toRNNBackend` """ ...
from .models import LSTM, GRU, ReLU, Tanh, mLSTM __all__ = ['models']
import torch import torch.nn as nn import torch.nn.functional as F from .RNNBackend import RNNCell from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend import math class mLSTMRNNCell(RNNCell): """ mLSTMRNNCell """ def __init__(self, input_size, hidden_size, bias = False, output_...
import torch import torch.distributed as dist from torch.nn import Parameter from torch.nn import Module from apex.parallel import DistributedDataParallel as DDP import argparse import os parser = argparse.ArgumentParser(description='allreduce hook example') parser.add_argument("--local_rank", default=0, type=int) ar...
import torch import argparse import os from apex import amp # FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead) from apex.parallel import DistributedDataParallel parser = argparse.ArgumentParser() # FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied # automatica...
import torch model_params_rank0 = torch.load("rank0model.pth", map_location = lambda storage, loc: storage.cuda(0)) model_params_rank1 = torch.load("rank1model.pth", map_location = lambda storage, loc: storage.cuda(0)) master_params_rank0 = torch.load("rank0m...
import torch import numpy as np import apex import syncbn import os import argparse import torch.optim as optim def compare(desc, inp1, inp2, error): a = inp1.clone().detach().cpu().numpy() b = inp2.clone().detach().cpu().numpy() close = np.allclose(a,b, error, error) if not close: print(desc, ...
import torch import numpy as np import apex if True: print("using setup tools") import syncbn else: print("using jit") from torch.utils.cpp_extension import load syncbn = load(name='syncbn', sources=['../../csrc/syncbn.cpp', '../../csrc/welford.cu']) def compare(desc, inp1, inp2, error): a = in...
import torch import numpy as np import apex import syncbn import os import argparse import torch.optim as optim def compare(desc, inp1, inp2, error): a = inp1.clone().detach().cpu().numpy() b = inp2.clone().detach().cpu().numpy() close = np.allclose(a,b, error, error) if not close: print(desc, ...
import unittest import sys test_dirs = ["run_amp", "run_fp16util", "run_mixed_adam", "run_fused_layer_norm"] runner = unittest.TextTestRunner(verbosity=2) errcode = 0 for test_dir in test_dirs: suite = unittest.TestLoader().discover(test_dir) print("\nExecuting tests from " + test_dir) result = runner...
import unittest import os import random import torch import apex class TestFusedAdam(unittest.TestCase): def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7): self.max_abs_diff = max_abs_diff self.max_rel_diff = max_rel_diff self.iters = iters torch.cuda.manual_seed(9876) ...
import unittest import torch import apex class TestFP16Optimizer(unittest.TestCase): def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7): self.max_abs_diff = max_abs_diff self.max_rel_diff = max_rel_diff self.iters = iters torch.cuda.manual_seed(13337) N, D_in, D_ou...
import unittest import os import random import torch import apex class TestFusedLayerNorm(unittest.TestCase): def setUp(self): self.module = apex.normalization.FusedLayerNorm(normalized_shape=[32, 64], elementwise_affine=False) self.input_ = torch.randn(16, 32, 64) torch.cuda.manu...
import unittest import functools as ft import itertools as it from apex import amp from apex.amp import _amp_state import torch from torch import nn import torch.nn.functional as F from torch.nn import Parameter from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT class MyModel(to...
import unittest import functools as ft import itertools as it from apex import amp import torch from torch import nn import torch.nn.functional as F from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT try: import amp_C from amp_C import multi_tensor_l2norm from apex.multi_t...
import unittest import functools as ft import itertools as it from apex import amp from apex.amp import _amp_state import torch from torch import nn import torch.nn.functional as F from torch.nn import Parameter from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT class MyModel(to...
import unittest import itertools as it from apex import amp import torch from torch import nn import torch.nn.functional as F from utils import common_init, HALF, FLOAT, DTYPES class TestPromotion(unittest.TestCase): def setUp(self): self.handle = amp.init(enabled=True) common_init(self) de...
import unittest import functools as ft import itertools as it from apex import amp import torch from torch import nn import torch.nn.functional as F from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT try: import amp_C from amp_C import multi_tensor_axpby from apex.multi_te...
import torch HALF = 'torch.cuda.HalfTensor' FLOAT = 'torch.cuda.FloatTensor' DTYPES = [torch.half, torch.float] ALWAYS_HALF = {torch.float: HALF, torch.half: HALF} ALWAYS_FLOAT = {torch.float: FLOAT, torch.half: FLOAT} MATCH_INPUT = {torch.float: FLOAT, torch.half: HALF}...
import unittest from apex import amp import random import torch from torch import nn from utils import common_init, HALF class TestRnnCells(unittest.TestCase): def setUp(self): self.handle = amp.init(enabled=True) common_init(self) def tearDown(self): self.handle._deactivate() d...
import unittest import functools as ft import itertools as it from apex import amp import torch from torch import nn import torch.nn.functional as F from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT try: import amp_C from amp_C import multi_tensor_scale from apex.multi_t...
import unittest import functools as ft import itertools as it from apex import amp from apex.amp import _amp_state import torch from torch import nn import torch.nn.functional as F from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT def get_reference_grad(i, w, ops): # Creati...
import unittest import functools as ft import itertools as it from apex import amp import torch from torch import nn import torch.nn.functional as F from utils import common_init, HALF, FLOAT,\ ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT def run_layer_test(test_case, fns, expected, input_shape, test_backward=True): ...
import unittest import torch import torch.nn as nn from apex.fp16_utils import FP16Model class DummyBlock(nn.Module): def __init__(self): super(DummyBlock, self).__init__() self.conv = nn.Conv2d(10, 10, 2) self.bn = nn.BatchNorm2d(10, affine=True) def forward(self, x): retu...
import argparse import os import shutil import time import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvi...
import argparse import torch parser = argparse.ArgumentParser(description='Compare') parser.add_argument('--opt-level', type=str) parser.add_argument('--keep-batchnorm-fp32', type=str, default=None) parser.add_argument('--loss-scale', type=str, default=None) parser.add_argument('--fused-adam', action='store_true') par...
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # PyTorch documentation build configuration file, created by # sphinx-quickstart on Fri Dec 23 13:31:47 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # au...
import torch import argparse import os from apex import amp # FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead) from apex.parallel import DistributedDataParallel parser = argparse.ArgumentParser() # FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied # automatica...
import argparse import os import shutil import time import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvi...
from pprint import pprint with open('cachegrind.out', 'rb') as f: events = None while True: l = f.readline() if l is None: break if l.startswith(b"events: "): events = l.decode('ascii').split()[1:] break assert events is not None f.seek(-200, ...
"""test_bench.py Runs hub models in benchmark mode using pytest-benchmark. Run setup separately first. Usage: python install.py pytest test_bench.py See pytest-benchmark help (pytest test_bench.py -h) for additional options e.g. --benchmark-autosave --benchmark-compare -k <filter expression> ... ""...
import os import traceback import argparse import importlib from pathlib import Path from typing import Dict CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) def list_benchmarks() -> Dict[str, str]: benchmarks = {} import userbenchmark bdir = Path(userbenchmark.__file__).parent.resolve() fb_b...
""" A lightweight runner that just sets up a model and runs one of its functions in a particular configuration. Intended for debugging/exploration/profiling use cases, where the test/measurement harness is overhead. DANGER: make sure to `python install.py` first or otherwise make sure the benchmark you are going to r...
import os import pytest import torch from torchbenchmark.util.machine_config import get_machine_config, check_machine_configured def pytest_addoption(parser): parser.addoption("--fuser", help="Use one of the available fusers: te, old, nvfuser", default="te", choices=["te", "old", "nvfuser"]) parser.addoption(...
#!/usr/bin/env python import argparse import gc import logging import os import re import warnings from torchbenchmark import list_models import torch NO_JIT = {"demucs", "dlrm", "maml", "yolov3", "moco", "pytorch_CycleGAN_and_pix2pix", "tacotron2"} NO_GET_MODULE = {"Background_Matting"} def get_dump_filename(name, ...
""" Compute the benchmark score given a frozen score configuration and current benchmark data. """ import argparse import json import math import yaml import sys import os from torchbenchmark.score.compute_score import TorchBenchScore if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc_...
import time import torch import argparse import json from dataclasses import asdict from torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name from typing import Dict SUPPORT_DEVICE_LIST = ["cpu", "cuda"] def run(func) -> Dict[str, float]: if torch.cuda.is_available(): torch.cuda.synchroniz...
""" Run a config of benchmarking with a list of models. If unspecified, run a sweep of all models. """ import argparse import json import os import sys import numpy import sys import torch import time import pathlib import dataclasses import itertools import torch from typing import List, Optional, Dict, Any, Tuple fro...
""" A Benchmark Summary Metadata tool to extract and generate metadata from models at runtime. """ import argparse from copy import deepcopy import os import yaml from typing import Any, Dict, List, Tuple import torch from torchbenchmark import list_models, load_model_by_name, _list_model_paths, ModelTask, ModelDetail...
"""test.py Setup and Run hub models. Make sure to enable an https proxy if necessary, or the setup steps may hang. """ # This file shows how to use the benchmark suite from user end. import gc import functools import os import traceback import unittest from unittest.mock import patch import yaml import torch from tor...
import argparse import json # import pandas as pd import os # import sys # import re import yaml import itertools # from bokeh.layouts import column, row, layout, gridplot # from bokeh.plotting import figure, output_file, show # from bokeh.sampledata.autompg import autompg # from bokeh.transform import jitter from bok...
import argparse import subprocess import os import sys import yaml import tarfile from utils import TORCH_DEPS, proxy_suggestion, get_pkg_versions, _test_https from pathlib import Path REPO_ROOT = Path(__file__).parent def s3_checkout(): S3_URL_BASE = "https://ossci-datasets.s3.amazonaws.com/torchbench" data_d...
""" The regression detector of TorchBench Userbenchmark. """ import json import argparse import importlib from dataclasses import dataclass, asdict import os import yaml from pathlib import Path import time from datetime import datetime from typing import Any, List, Dict, Optional from userbenchmark.utils import PLATFO...
import argparse import json from collections import namedtuple Result = namedtuple("Result", ["name", "base_time", "diff_time"]) def get_times(pytest_data): return {b["name"]: b["stats"]["mean"] for b in pytest_data["benchmarks"]} parser = argparse.ArgumentParser("compare two pytest jsons") parser.add_argument('...
#!/usr/bin/env python from collections import Counter, defaultdict from functools import partial from torch.cuda import synchronize from typing import Any, Dict, Callable, Optional import argparse import gc import logging import os import pandas as pd import re import time import warnings os.environ["FX_PATCH_GETITEM"...
"""bisection.py Runs bisection to determine PRs that cause performance change. It assumes that the pytorch, torchbench, torchtext, torchvision, and torchaudio repositories provided are all clean with the latest code. By default, the torchaudio, torchvision and torchtext packages will be fixed to the latest commit on th...
from enum import Enum # Enum class for each Domain for the model and the respective tasks # that is available in the domain. class COMPUTER_VISION(Enum): SEGMENTATION = "segmentation" CLASSIFICATION = "classification" DETECTION = "detection" GENERATION = "generation" PATTERN_RECOGNITION = "pattern ...
import contextlib import dataclasses import gc import importlib import io import os import pathlib import subprocess import sys import tempfile import threading from pathlib import Path from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple from urllib import request import torch from components._imp...
import os import pathlib import importlib from dataclasses import dataclass from typing import List, Dict, Any E2E_MODEL_DIR = 'e2e_models' def _list_model_paths() -> List[str]: p = pathlib.Path(__file__).parent.joinpath(E2E_MODEL_DIR) return sorted(str(child.absolute()) for child in p.iterdir() if child.is_d...
from torchbenchmark.util.framework.gnn.model_factory import GNNModel from torchbenchmark.tasks import GNN class Model(GNNModel): task = GNN.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]): super().__ini...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.diffusers.model_factory import DiffuserModel class Model(DiffuserModel): task = COMPUTER_VISION.GENERATION DEFAULT_TRAIN_BSIZE = 4 DEFAULT_EVAL_BSIZE = 1 # Default eval precision on CUDA device is fp16 DEFAULT_EVAL_...
from torchbenchmark.util.framework.diffusers import install_diffusers from diffusers import StableDiffusionInstructPix2PixPipeline import torch MODEL_NAME = "timbrooks/instruct-pix2pix" def load_model_checkpoint(): StableDiffusionInstructPix2PixPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safet...
import dataclasses from typing import List def cfg_to_str(cfg: dataclasses.dataclass) -> List[str]: def rewrite_option(opt: str) -> str: new_opt = opt.replace("_", "-") return f"--{new_opt}" out = [] for fld in dataclasses.fields(cfg): new_option = rewrite_option(fld.name) v...
import sys from torch.optim.lr_scheduler import _LRScheduler class LRPolicyScheduler(_LRScheduler): def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps): self.num_warmup_steps = num_warmup_steps self.decay_start_step = decay_start_step self.decay_end_step = dec...
""" Simplifed dlrm model from FAMBench It doesn't support multiGPU or fbgemm_gpu. """ import torch import sys import os import numpy as np import torch.nn as nn from torchbenchmark import REPO_PATH from typing import Tuple, List from torchbenchmark.util.model import BenchmarkModel from torchbenchmark.tasks import RECOM...
import torch.nn as nn import torch import sys import numpy as np import itertools from torch._ops import ops from torch.nn.parameter import Parameter from torch.nn.parallel.replicate import replicate from torch.nn.parallel.parallel_apply import parallel_apply from torch.nn.parallel.scatter_gather import gather, scatter...
import torch # The following function is a wrapper to avoid checking this multiple times in th # loop below. def unpack_batch(b, device): # Experiment with unweighted samples return b[0], b[1], b[2], b[3], torch.ones(b[3].size()).to(device), None def dlrm_wrap(dlrm, X, lS_o, lS_i, use_gpu, device, ndevices=1)...
# Currently, this file is not used, because torchbench doesn't support fbgemm embeddding yet; # Note that FAMBench does support it. import torch.nn as nn import torch import os import sys import numpy as np from torchbenchmark import REPO_PATH # This file assumes fbgemm_gpu is installed import fbgemm_gpu from fbgemm_gp...
# Original source: # https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/dlrm/ootb/dlrm_s_pytorch.py import sys import torch import argparse def dash_separated_ints(value): vals = value.split("-") for val in vals: try: int(val) excep...
import os import sys import torch import subprocess from torchbenchmark import REPO_PATH def update_fambench_submodule(): "Update FAMBench submodule of the benchmark repo" update_command = ["git", "submodule", "update", "--init", "--recursive", os.path.join("submodules","FAMBench")] ...
import torch import sys import numpy as np # data generation import dlrm_data_pytorch as dp def prep_data(args): ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-") if args.data_generation == "dataset": train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args) t...
""" HuggingFace Stable Diffusion model. It requires users to specify "HUGGINGFACE_AUTH_TOKEN" in environment variable to authorize login and agree HuggingFace terms and conditions. """ from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.model import BenchmarkModel import torch from diffusers impo...
from torchbenchmark.util.framework.diffusers import install_diffusers from diffusers import StableDiffusionPipeline import torch MODEL_NAME = "stabilityai/stable-diffusion-2" def load_model_checkpoint(): StableDiffusionPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safety_checker=None) if __name...
from torchbenchmark.util.framework.gnn.model_factory import GNNModel from torchbenchmark.tasks import GNN class Model(GNNModel): task = GNN.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]): super().__ini...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.util.framework.gnn.model_factory import GNNModel from torchbenchmark.tasks import GNN class Model(GNNModel): task = GNN.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]): super().__ini...
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html']) if __name__ == '__main__': pip_install_requirements()
import torch from typing import Optional, List from contextlib import contextmanager, ExitStack from typing import ContextManager class PostInitProcessor(type): def __call__(cls, *args, **kwargs): obj = type.__call__(cls, *args, **kwargs) obj.__post__init__() return obj @contextmanager def...
import argparse import enum from typing import List, Optional, Tuple from torchbenchmark.util.backends import list_backends, BACKENDS from torchbenchmark.util.backends.flops import enable_fvcore_flops from torchbenchmark.util.env_check import is_torchvision_model, is_staged_train_test TEST_STAGE = enum.Enum('TEST_STA...
import argparse import re import torch from enum import Enum class OpType(Enum): POINTWISE = 1 NORMS = 2 REDUCTIONS = 3 VIEWS_EXPANDS = 4 REMOVE = 5 IGNORE = 6 op_types = { "aten::rsqrt": OpType.POINTWISE, "aten::abs": OpType.POINTWISE, "aten::eq": OpType.POINTWISE, "aten::gel...
"""Utilities for tuning the machine for better benchmark stability. Written for Amazon linux and Intel CPU, Nvidia GPU althogh many utilities will overlap. """ import argparse import cpuinfo import distro import enum import os import platform import psutil import subprocess import re import sys import typing from path...
"""gitutils.py Utils for getting git-related information. """ import os import time from pathlib import Path import subprocess from datetime import datetime from typing import Optional, List def clean_git_repo(repo: str) -> bool: try: command = f"git clean -xdf" subprocess.check_call(command, cwd...
import copy import importlib import os import torch from contextlib import contextmanager, ExitStack import warnings import inspect import yaml from pathlib import Path from typing import ContextManager, Optional, List, Tuple, Generator from torch.utils._pytree import tree_map from torchbenchmark import REPO_PATH from ...
""" Return a list of recent PyTorch wheels published on download.pytorch.org. Users can specify package name, python version, platform, and the number of days to return. If one of the packages specified is missing on one day, the script will skip outputing the results on that day. """ import os import re import reques...
""" Utils for model metadata """ from typing import Any, List, Dict def match_item(item_name: str, item_val: str, skip_item: Dict[str, Any]) -> bool: if item_name not in skip_item: return True return skip_item[item_name] == item_val def skip_by_metadata(test: str, device:str, jit: bool, extra_args: L...
def prefetch_loader(loader, device): result = [] for data in loader: items = [] for item in data: items.append(item.to(device)) result.append(tuple(items)) return result
import argparse import os import subprocess from datetime import date, timedelta from pathlib import Path from torch_nightly import get_n_prior_nightly_wheels def run_step(cmd, cwd=None, conda_env=None, verbose=True): if verbose: print(f" # running step: {cmd}") if conda_env: cmd = f'conda run...
""" PyTorch benchmark env check utils. This file may be loaded without torch packages installed, e.g., in OnDemand CI. """ import importlib import copy import warnings from typing import List, Dict, Tuple, Optional MAIN_RANDOM_SEED = 1337 # rounds for stableness tests STABLENESS_CHECK_ROUNDS: int = 3 # rounds for corr...
import re import torch from torch.ao.quantization import QuantWrapper, get_default_qconfig_mapping, get_default_qconfig_propagation_list from torch.ao.quantization.quantize_fx import _fuse_fx, prepare_fx, convert_fx from torchbenchmark.util.env_check import is_hf_model def _append_attr(fx_module, module, fx_white_list...
import json import os import pandas as pd import typing class BenchmarkData: def __init__(self): self._benchmark_data = {} self._machine_info = {} self._commit_info = {} self._names_all = set() self._names_common = set() self._tags = [] self._json_raw = [] ...
# coding: utf8 from collections import Counter, OrderedDict from itertools import chain import torch from tqdm import tqdm from .data import Dataset from .pipeline import Pipeline from torchtext.data.utils import get_tokenizer, dtype_to_attr, is_tokenizer_serializable from .vocab import Vocab class RawField(object): ...
import torch from collections import defaultdict class Vocab(object): """Defines a vocabulary object that will be used to numericalize a field. Attributes: freqs: A collections.Counter object holding the frequencies of tokens in the data used to build the Vocab. stoi: A collections....
import torch class Batch(object): """Defines a batch of examples along with its Fields. Attributes: batch_size: Number of examples in the batch. dataset: A reference to the dataset object the examples come from (which itself contains the dataset's Field objects). train: Dep...
from .data import Dataset from .example import Example class SequenceTaggingDataset(Dataset): """Defines a dataset for sequence tagging. Examples in this dataset contain paired lists -- paired list of words and tags. For example, in the case of part-of-speech tagging, an example is of the form [I, ...
import math import random import logging import torch from torchtext.data.utils import RandomShuffler from .batch import Batch from .data import Dataset logger = logging.getLogger(__name__) class Iterator(object): """Defines an iterator that loads batches of data from a Dataset. Attributes: dataset:...
import json from functools import reduce class Example(object): """Defines a single training or test example. Stores each column of the example as an attribute. """ @classmethod def fromJSON(cls, data, fields): ex = cls() obj = json.loads(data) for key, vals in fields.item...
class Pipeline(object): """Defines a pipeline for transforming sequence data. The input is assumed to be utf-8 encoded `str`. Attributes: convert_token: The function to apply to input sequence data. pipes: The Pipelines that will be applied to input sequence data in order. ""...
import os try: import defusedxml.ElementTree as ET except ImportError: import xml.etree.ElementTree as ET import glob import io import codecs from .data import Dataset from .example import Example class TranslationDataset(Dataset): """Defines a dataset for machine translation.""" @staticmethod d...
import io import os import zipfile import tarfile import gzip import sys import csv import shutil from functools import partial import torch.utils.data from torchtext.data.utils import RandomShuffler from .example import Example from torchtext.utils import download_from_url def unicode_csv_reader(unicode_csv_data, *...
import subprocess import os import sys from pathlib import Path CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__))) def pip_install_requirements(): requirements_file = os.path.join(CURRENT_DIR, "requirements.txt") subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirem...
import torch from torchbenchmark.util.model import BenchmarkModel from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler from typing import Optional, List class DiffuserModel(BenchmarkModel): DIFFUSER_MODEL = True def __init__(self, name: str, test: str, device: str, ji...
import torch from typing import Tuple def enable_cudagraph(model: 'torchbenchmark.util.model.BenchmarkModel', example_inputs: Tuple[torch.tensor]): optimizer = model.optimizer loss_fn = model.loss_fn # warmup s = torch.cuda.Stream() s.wait_stream(torch.cuda.current_stream()) with torch.cuda.str...
import os import torch import typing import torch.optim as optim import torchvision.models as models from contextlib import nullcontext from torchbenchmark.util.model import BenchmarkModel from typing import Tuple, Generator, Optional class TorchVisionModel(BenchmarkModel): # To recognize this is a torchvision mod...
import argparse def parse_tb_args(args): parser = argparse.ArgumentParser() parser.add_argument("--graph_type", choices=["dense", "sparse"], default="dense", help="Determine dense graph or sparse graph") args, unknown_args = parser.parse_known_args(args) return args, unknown_args