python_code stringlengths 0 229k |
|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Ensemble posteriors. Used in conjunction with ensemble models.
"""
from __future__ import annotations
from typ... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Abstract base module for all botorch posteriors.
"""
from __future__ import annotations
import warnings
from ... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, Union
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.poste... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Posterior module to be used with PyTorch distributions.
"""
from __future__ import annotations
from typing imp... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Callable, Optional, Tuple
import torch
from botorch.posterior... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Candidate generation utilities.
"""
from __future__ import annotations
import time
import warnings
from functo... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.generation.gen import (
gen_candidates_scipy,
gen_candidates_torch,
get_best_candidates,
)
... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Dict, List, Option... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Sampling-based generation strategies.
A SamplingStrategy returns samples from the input points (i.e. Tensors in... |
#!/usr/bin/env python
import confu
parser = confu.standard_parser("cpuinfo configuration script")
parser.add_argument("--log", dest="log_level",
choices=("none", "fatal", "error", "warning", "info", "debug"), default="error")
parser.add_argument("--mock", dest="mock", action="store_true")
def main(args):
op... |
#!/usr/bin/env python
import os
import sys
import argparse
import shutil
parser = argparse.ArgumentParser(description='Android system files extractor')
parser.add_argument("-p", "--prefix", metavar="NAME", required=True,
help="Prefix for stored files, e.g. galaxy-s7-us")
SYSTEM_FILES = [
"/... |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
parser = argparse.ArgumentParser(description='x86 CPUID dump parser')
parser.add_argument("input", metavar="INPUT", nargs=1,
help="Path to CPUID dump log")
def main(args):
options = parser.pars... |
#!/usr/bin/env python
import os
import sys
import string
import argparse
import subprocess
import tempfile
root_dir = os.path.abspath(os.path.dirname(__file__))
parser = argparse.ArgumentParser(description='Android system files extractor')
parser.add_argument("-p", "--prefix", metavar="NAME", required=True,
... |
#!/usr/bin/env python
import confu
parser = confu.standard_parser("clog configuration script")
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath("include", ["clog.h"])
with build.options(source_dir="src", extra_include_dirs="src"):
... |
from typing import Dict, List, Optional, Tuple
import json
import math
from fairseq.data import Dictionary
import torch
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.models import Hypothesis
def get_hypo_tokens(hypo: Hypothesis) -> List[int]:
return hypo[0]
d... |
import torch
import torchaudio
from torch.utils.mobile_optimizer import optimize_for_mobile
def get_demo_wrapper():
wrapper = torch.jit.load("scripted_wrapper_tuple.pt")
return wrapper
wrapper = get_demo_wrapper()
scripted_model = torch.jit.script(wrapper)
optimized_model = optimize_for_mobile(scripted_model)... |
import pyaudio
import queue
import numpy as np
import torch
import torchaudio
def get_demo_wrapper():
wrapper = torch.jit.load("scripted_wrapper_tuple.pt")
return wrapper
wrapper = get_demo_wrapper()
################################################################
data_queue = queue.Queue()
def callba... |
import torch
import torch.utils.cpp_extension
print(torch.version.__version__)
op_source = """
#include <opencv2/opencv.hpp>
#include <torch/script.h>
torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
cv::Mat image_mat(/*rows=*/image.size(0),
/*cols=*/image.size(1),
... |
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torch.hub.load('pytorch/vision:v0.11.0', 'deeplabv3_resnet50', pretrained=True)
model.eval()
scripted_module = torch.jit.script(model)
optimized_scripted_module = optimize_for_mobile(scripted_module)
# Export full jit version model (no... |
import torch
from torch import Tensor
from torch.utils.mobile_optimizer import optimize_for_mobile
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from transformers import Wav2Vec2ForCTC
# Wav2vec2 model emits sequences of probability (logits) distributions ov... |
import torch
from pytorchvideo.accelerator.deployment.mobile_cpu.utils.model_conversion import (
convert_to_deployable_form,
)
from pytorchvideo.models.accelerator.mobile_cpu.efficient_x3d import EfficientX3d
from torch.hub import load_state_dict_from_url
from torch.utils.mobile_optimizer import (
optimize_for_... |
import torch
from transformers import DistilBertTokenizer, DistilBertForQuestionAnswering
from torch.utils.mobile_optimizer import optimize_for_mobile
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased-distilled-squad')
model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncas... |
# based on https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional a... |
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torch.hub.load('facebookresearch/deit:main', 'deit_base_patch16_224', pretrained=True)
quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
ts_model = torch.jit.script(quantized_... |
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__... |
import torch
import torchvision
import time
from vit_pytorch import *
from torch.utils.mobile_optimizer import optimize_for_mobile
torch.manual_seed(42)
DOWNLOAD_PATH = 'data/mnist'
BATCH_SIZE_TRAIN = 100
BATCH_SIZE_TEST = 1000
# 0.1307 and 0.3081 are the mean and std computed on the MNIST training set
transform_mn... |
#!/usr/bin/env python3
import contextlib
import copy
import os
import unittest
from PIL import Image
import torch
from d2go.export.api import convert_and_export_predictor
from d2go.export.d2_meta_arch import patch_d2_meta_arch
from d2go.runner import create_runner, GeneralizedRCNNRunner
from d2go.model_zoo import mod... |
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.mobilenet_v3_small(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
optimized_traced_model = optimize_for_mobile(traced_script... |
'''
USAGE:
python create_csv.py
'''
import pandas as pd
import numpy as np
import os
import joblib
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
from imutils import paths
# get all the image paths
image_paths = list(paths.list_images('preprocessed_image'))
# create a DataFrame
data = pd.DataFr... |
'''
USAGE:
python preprocess_image.py --num-images 1200
'''
import os
import cv2
import random
import numpy as np
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num-images', default=1200, type=int,
help='number of images to preprocess for each category')
args =... |
import torch.nn as nn
import torch.nn.functional as F
import joblib
# load the binarized labels
print('Loading label binarizer...')
lb = joblib.load('lb.pkl')
class CustomCNN(nn.Module):
def __init__(self):
super(CustomCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5)
self.conv2 = nn.C... |
import torch
import joblib
import cnn_models
from torch.utils.mobile_optimizer import optimize_for_mobile
lb = joblib.load('lb.pkl')
model = cnn_models.CustomCNN()
model.load_state_dict(torch.load('asl.pth'))
scripted_module = torch.jit.script(model)
optimized_scripted_module = optimize_for_mobile(scripted_module)
op... |
'''
USAGE:
python test.py --img A_test.jpg
'''
import torch
import joblib
import torch.nn as nn
import numpy as np
import cv2
import argparse
import torchvision.transforms as transforms
import torch.nn.functional as F
import time
import cnn_models
from PIL import Image
# construct the argument parser and parse the arg... |
'''
USAGE:
python train.py --epochs 10
'''
import pandas as pd
import joblib
import numpy as np
import torch
import random
from PIL import Image
import matplotlib.pyplot as plt
import argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
i... |
"""test_bench.py
Runs hub models in benchmark mode using pytest-benchmark. Run setup separately first.
Usage:
python install.py
pytest test_bench.py
See pytest-benchmark help (pytest test_bench.py -h) for additional options
e.g. --benchmark-autosave
--benchmark-compare
-k <filter expression>
...
""... |
import os
import traceback
import argparse
import importlib
from pathlib import Path
from typing import Dict
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def list_benchmarks() -> Dict[str, str]:
benchmarks = {}
import userbenchmark
bdir = Path(userbenchmark.__file__).parent.resolve()
fb_b... |
"""
A lightweight runner that just sets up a model and runs one of its functions in a particular configuration.
Intended for debugging/exploration/profiling use cases, where the test/measurement harness is overhead.
DANGER: make sure to `python install.py` first or otherwise make sure the benchmark you are going to r... |
import os
import pytest
import torch
from torchbenchmark.util.machine_config import get_machine_config, check_machine_configured
def pytest_addoption(parser):
parser.addoption("--fuser", help="Use one of the available fusers: te, old, nvfuser", default="te", choices=["te", "old", "nvfuser"])
parser.addoption(... |
import time
import torch
import argparse
import json
from dataclasses import asdict
from torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name
from typing import Dict
SUPPORT_DEVICE_LIST = ["cpu", "cuda"]
def run(func) -> Dict[str, float]:
if torch.cuda.is_available():
torch.cuda.synchroniz... |
"""
A Benchmark Summary Metadata tool to extract and generate metadata from models at runtime.
"""
import argparse
from copy import deepcopy
import os
import yaml
from typing import Any, Dict, List, Tuple
import torch
from torchbenchmark import list_models, load_model_by_name, _list_model_paths, ModelTask, ModelDetail... |
"""test.py
Setup and Run hub models.
Make sure to enable an https proxy if necessary, or the setup steps may hang.
"""
# This file shows how to use the benchmark suite from user end.
import gc
import functools
import os
import traceback
import unittest
from unittest.mock import patch
import yaml
import torch
from tor... |
import argparse
import subprocess
import os
import sys
from utils import TORCH_DEPS, proxy_suggestion, get_pkg_versions, _test_https
from userbenchmark import list_userbenchmarks
from pathlib import Path
REPO_ROOT = Path(__file__).parent
def pip_install_requirements(requirements_txt="requirements.txt"):
if not _t... |
"""
The regression detector of TorchBench Userbenchmark.
"""
import json
import argparse
import importlib
from dataclasses import asdict
import os
import yaml
from pathlib import Path
import time
from datetime import datetime
from typing import Any, List, Dict, Optional
from userbenchmark.utils import PLATFORMS, USERBE... |
"""bisection.py
Runs bisection to determine PRs that trigger performance signals.
It assumes that the pytorch, torchbench, torchvision, and torchaudio repositories provided are all clean with the latest code.
By default, the torchaudio and torchvision packages will be fixed to the latest commit on the same pytorch comm... |
from enum import Enum
# Enum class for each Domain for the model and the respective tasks
# that is available in the domain.
class COMPUTER_VISION(Enum):
SEGMENTATION = "segmentation"
CLASSIFICATION = "classification"
DETECTION = "detection"
GENERATION = "generation"
PATTERN_RECOGNITION = "pattern ... |
import contextlib
import dataclasses
import gc
import importlib
import io
import os
import pathlib
import subprocess
import sys
import tempfile
import threading
from pathlib import Path
from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple
from urllib import request
import torch
from components._imp... |
import os
import pathlib
import importlib
from dataclasses import dataclass
from typing import List, Dict, Any
E2E_MODEL_DIR = 'e2e_models'
def _list_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(E2E_MODEL_DIR)
return sorted(str(child.absolute()) for child in p.iterdir() if child.is_d... |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_n... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.diffusers.model_factory import DiffuserModel
class Model(DiffuserModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_... |
from torchbenchmark.util.framework.diffusers import install_diffusers
from diffusers import StableDiffusionInstructPix2PixPipeline
import torch
MODEL_NAME = "timbrooks/instruct-pix2pix"
def load_model_checkpoint():
StableDiffusionInstructPix2PixPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safet... |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __i... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from .. import lit_llama as lit_llama
from ..lit_llama import LIT_LLAMA_PATH
import importlib.util
import os.path
import torch.nn as nn
import sys
from lit_llama import Tokenizer
def import_from_file_path(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = im... |
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from PIL import Image
import numpy as np
import cv2
import torch
import os
from ...util.model import BenchmarkModel
from torchmultimodal.transforms.clip_t... |
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
def download_data(data_folder):
# CC-0 image from wikipedia page on pizza so legal to use
subprocess.check_call(['wget', '-O', os.path.join... |
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
import os
from torchbenchmark import add_path, REPO_PATH
import sys
import lightning as L
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
with add_path(LIT_LLAMA_PATH):
from lit_llama.utils import EmptyI... |
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
import dataclasses
from typing import List
def cfg_to_str(cfg: dataclasses.dataclass) -> List[str]:
def rewrite_option(opt: str) -> str:
new_opt = opt.replace("_", "-")
return f"--{new_opt}"
out = []
for fld in dataclasses.fields(cfg):
new_option = rewrite_option(fld.name)
v... |
import sys
from torch.optim.lr_scheduler import _LRScheduler
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = dec... |
"""
Simplifed dlrm model from FAMBench
It doesn't support multiGPU or fbgemm_gpu.
"""
import torch
import sys
import os
import numpy as np
import torch.nn as nn
from torchbenchmark import REPO_PATH
from typing import Tuple, List
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import RECOM... |
import torch.nn as nn
import torch
import sys
import numpy as np
import itertools
from torch._ops import ops
from torch.nn.parameter import Parameter
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import gather, scatter... |
import torch
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b, device):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()).to(device), None
def dlrm_wrap(dlrm, X, lS_o, lS_i, use_gpu, device, ndevices=1)... |
# Currently, this file is not used, because torchbench doesn't support fbgemm embeddding yet;
# Note that FAMBench does support it.
import torch.nn as nn
import torch
import os
import sys
import numpy as np
from torchbenchmark import REPO_PATH
# This file assumes fbgemm_gpu is installed
import fbgemm_gpu
from fbgemm_gp... |
# Original source:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/dlrm/ootb/dlrm_s_pytorch.py
import sys
import torch
import argparse
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
excep... |
import os
import sys
import torch
import subprocess
from torchbenchmark import REPO_PATH
def update_fambench_submodule():
"Update FAMBench submodule of the benchmark repo"
update_command = ["git", "submodule", "update",
"--init", "--recursive", os.path.join("submodules","FAMBench")]
... |
import torch
import sys
import numpy as np
# data generation
import dlrm_data_pytorch as dp
def prep_data(args):
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
if args.data_generation == "dataset":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
t... |
import torch
# OSS import
try:
# pyre-ignore[21]
# @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:dlrm_dataloader
from .data.dlrm_dataloader import get_dataloader
except ImportError:
pass
import itertools
import os
from pyre_extensions import none_throws
from torch import distributed as dis... |
import argparse
from enum import Enum
from typing import List
class InteractionType(Enum):
ORIGINAL = "original"
DCN = "dcn"
PROJECTION = "projection"
def __str__(self):
return self.value
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=... |
import subprocess
import sys
import os
from pathlib import Path
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from typing import List
from torch import distributed as dist
from torch.utils.data import... |
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceGenerationModel
class Model(HuggingFaceGenerationModel):
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_GPT2_generate", test=test, device=device, batch_size=batch_size, extra_args=ext... |
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from ..lit_llama import LIT_LLAMA_PATH
import importlib.util
import os.path
import torch.nn as nn
import sys
from lit_llama.lora import mark_only_lora_as_trainable, lora, lora_state_dict
from torchbenchmark import REPO_PATH
LIT_... |
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __i... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# https://huggingface.co/mosaicml/mpt-7b
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, dev... |
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name, trust_remote_code=True) |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_n... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __i... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_n... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
from typing import Optional, List
from contextlib import contextmanager, ExitStack
from typing import ContextManager
class PostInitProcessor(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post__init__()
return obj
@contextmanager
def... |
import argparse
import enum
from typing import List, Optional, Tuple
from torchbenchmark.util.backends import list_backends, BACKENDS
from torchbenchmark.util.env_check import is_staged_train_test
TEST_STAGE = enum.Enum('TEST_STAGE', ['FORWARD', 'BACKWARD', 'OPTIMIZER', 'ALL'])
AVAILABLE_PRECISIONS = ["fp32", "tf32", ... |
import argparse
import re
import torch
from enum import Enum
class OpType(Enum):
POINTWISE = 1
NORMS = 2
REDUCTIONS = 3
VIEWS_EXPANDS = 4
REMOVE = 5
IGNORE = 6
op_types = {
"aten::rsqrt": OpType.POINTWISE,
"aten::abs": OpType.POINTWISE,
"aten::eq": OpType.POINTWISE,
"aten::gel... |
"""Utilities for tuning the machine for better benchmark stability.
Written for Amazon linux and Intel CPU, Nvidia GPU althogh many utilities will overlap.
"""
import argparse
import cpuinfo
import distro
import enum
import os
import platform
import psutil
import subprocess
import re
import sys
import typing
from path... |
import importlib
import os
import torch
from contextlib import contextmanager, ExitStack
import warnings
import inspect
import yaml
from pathlib import Path
from typing import ContextManager, Optional, List, Tuple, Generator
from torch.utils._pytree import tree_map
from torchbenchmark import REPO_PATH
from torchbenchma... |
"""
Return a list of recent PyTorch wheels published on download.pytorch.org.
Users can specify package name, python version, platform, and the number of days to return.
If one of the packages specified is missing on one day, the script will skip outputing the results on that day.
"""
import os
import re
import reques... |
"""
Utils for model metadata
"""
from typing import Any, List, Dict
def match_item(item_name: str, item_val: str, skip_item: Dict[str, Any]) -> bool:
if item_name not in skip_item:
return True
return skip_item[item_name] == item_val
def skip_by_metadata(test: str, device:str, extra_args: List[str], m... |
def prefetch_loader(loader, device):
result = []
for data in loader:
items = []
for item in data:
items.append(item.to(device))
result.append(tuple(items))
return result |
"""
PyTorch benchmark env check utils.
This file may be loaded without torch packages installed, e.g., in OnDemand CI.
"""
import copy
import importlib
import os
import argparse
import logging
from contextlib import contextmanager, ExitStack
from typing import Any, Dict, List, Optional
MAIN_RANDOM_SEED = 1337
# rounds... |
import re
import torch
from torch.ao.quantization import QuantWrapper, get_default_qconfig_mapping, get_default_qconfig_propagation_list
from torch.ao.quantization.quantize_fx import _fuse_fx, prepare_fx, convert_fx
from torchbenchmark.util.env_check import is_hf_model
def _append_attr(fx_module, module, fx_white_list... |
import json
import os
import pandas as pd
import typing
class BenchmarkData:
def __init__(self):
self._benchmark_data = {}
self._machine_info = {}
self._commit_info = {}
self._names_all = set()
self._names_common = set()
self._tags = []
self._json_raw = []
... |
import os
import sys
import subprocess
import traceback
from pathlib import Path
from torchbenchmark import REPO_PATH
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
def update_lit_llama_submodule():
update_command = ["git", "submodule", "update",
"--init", "--recursive",... |
import subprocess
import os
import sys
from pathlib import Path
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def pip_install_requirements():
requirements_file = os.path.join(CURRENT_DIR, "requirements.txt")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirem... |
import torch
from torchbenchmark.util.model import BenchmarkModel
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
from typing import Optional, List
class DiffuserModel(BenchmarkModel):
DIFFUSER_MODEL = True
def __init__(self, name: str, test: str, device: str, ba... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.