python_code stringlengths 0 229k |
|---|
'''
USAGE:
python create_csv.py
'''
import pandas as pd
import numpy as np
import os
import joblib
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
from imutils import paths
# get all the image paths
image_paths = list(paths.list_images('preprocessed_image'))
# create a DataFrame
data = pd.DataFr... |
'''
USAGE:
python preprocess_image.py --num-images 1200
'''
import os
import cv2
import random
import numpy as np
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num-images', default=1200, type=int,
help='number of images to preprocess for each category')
args =... |
import torch.nn as nn
import torch.nn.functional as F
import joblib
# load the binarized labels
print('Loading label binarizer...')
lb = joblib.load('lb.pkl')
class CustomCNN(nn.Module):
def __init__(self):
super(CustomCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5)
self.conv2 = nn.C... |
import torch
import joblib
import cnn_models
from torch.utils.mobile_optimizer import optimize_for_mobile
lb = joblib.load('lb.pkl')
model = cnn_models.CustomCNN()
model.load_state_dict(torch.load('asl.pth'))
scripted_module = torch.jit.script(model)
optimized_scripted_module = optimize_for_mobile(scripted_module)
op... |
'''
USAGE:
python test.py --img A_test.jpg
'''
import torch
import joblib
import torch.nn as nn
import numpy as np
import cv2
import argparse
import torchvision.transforms as transforms
import torch.nn.functional as F
import time
import cnn_models
from PIL import Image
# construct the argument parser and parse the arg... |
'''
USAGE:
python train.py --epochs 10
'''
import pandas as pd
import joblib
import numpy as np
import torch
import random
from PIL import Image
import matplotlib.pyplot as plt
import argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
i... |
"""test_bench.py
Runs hub models in benchmark mode using pytest-benchmark. Run setup separately first.
Usage:
python install.py
pytest test_bench.py
See pytest-benchmark help (pytest test_bench.py -h) for additional options
e.g. --benchmark-autosave
--benchmark-compare
-k <filter expression>
...
""... |
import os
import traceback
import argparse
import importlib
from pathlib import Path
from typing import Dict
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def list_benchmarks() -> Dict[str, str]:
benchmarks = {}
import userbenchmark
bdir = Path(userbenchmark.__file__).parent.resolve()
fb_b... |
"""
A lightweight runner that just sets up a model and runs one of its functions in a particular configuration.
Intended for debugging/exploration/profiling use cases, where the test/measurement harness is overhead.
DANGER: make sure to `python install.py` first or otherwise make sure the benchmark you are going to r... |
import os
import pytest
import torch
from torchbenchmark.util.machine_config import get_machine_config, check_machine_configured
def pytest_addoption(parser):
parser.addoption("--fuser", help="Use one of the available fusers: te, old, nvfuser", default="te", choices=["te", "old", "nvfuser"])
parser.addoption(... |
import time
import torch
import argparse
import json
from dataclasses import asdict
from torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name
from typing import Dict
SUPPORT_DEVICE_LIST = ["cpu", "cuda"]
def run(func) -> Dict[str, float]:
if torch.cuda.is_available():
torch.cuda.synchroniz... |
"""
A Benchmark Summary Metadata tool to extract and generate metadata from models at runtime.
"""
import argparse
from copy import deepcopy
import os
import yaml
from typing import Any, Dict, List, Tuple
import torch
from torchbenchmark import list_models, load_model_by_name, _list_model_paths, ModelTask, ModelDetail... |
"""test.py
Setup and Run hub models.
Make sure to enable an https proxy if necessary, or the setup steps may hang.
"""
# This file shows how to use the benchmark suite from user end.
import gc
import functools
import os
import traceback
import unittest
from unittest.mock import patch
import yaml
import torch
from tor... |
import argparse
import subprocess
import os
import sys
from utils import TORCH_DEPS, proxy_suggestion, get_pkg_versions, _test_https
from userbenchmark import list_userbenchmarks
from pathlib import Path
REPO_ROOT = Path(__file__).parent
def pip_install_requirements(requirements_txt="requirements.txt"):
if not _t... |
"""
The regression detector of TorchBench Userbenchmark.
"""
import json
import argparse
import importlib
from dataclasses import asdict
import os
import yaml
from pathlib import Path
import time
from datetime import datetime
from typing import Any, List, Dict, Optional
from userbenchmark.utils import PLATFORMS, USERBE... |
"""bisection.py
Runs bisection to determine PRs that trigger performance signals.
It assumes that the pytorch, torchbench, torchvision, and torchaudio repositories provided are all clean with the latest code.
By default, the torchaudio and torchvision packages will be fixed to the latest commit on the same pytorch comm... |
from enum import Enum
# Enum class for each Domain for the model and the respective tasks
# that is available in the domain.
class COMPUTER_VISION(Enum):
SEGMENTATION = "segmentation"
CLASSIFICATION = "classification"
DETECTION = "detection"
GENERATION = "generation"
PATTERN_RECOGNITION = "pattern ... |
import contextlib
import dataclasses
import gc
import importlib
import io
import os
import pathlib
import subprocess
import sys
import tempfile
import threading
from pathlib import Path
from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple
from urllib import request
import torch
from components._imp... |
import os
import pathlib
import importlib
from dataclasses import dataclass
from typing import List, Dict, Any
E2E_MODEL_DIR = 'e2e_models'
def _list_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(E2E_MODEL_DIR)
return sorted(str(child.absolute()) for child in p.iterdir() if child.is_d... |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_n... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.diffusers.model_factory import DiffuserModel
class Model(DiffuserModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_... |
from torchbenchmark.util.framework.diffusers import install_diffusers
from diffusers import StableDiffusionInstructPix2PixPipeline
import torch
MODEL_NAME = "timbrooks/instruct-pix2pix"
def load_model_checkpoint():
StableDiffusionInstructPix2PixPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safet... |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __i... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from .. import lit_llama as lit_llama
from ..lit_llama import LIT_LLAMA_PATH
import importlib.util
import os.path
import torch.nn as nn
import sys
from lit_llama import Tokenizer
def import_from_file_path(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = im... |
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from PIL import Image
import numpy as np
import cv2
import torch
import os
from ...util.model import BenchmarkModel
from torchmultimodal.transforms.clip_t... |
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
def download_data(data_folder):
# CC-0 image from wikipedia page on pizza so legal to use
subprocess.check_call(['wget', '-O', os.path.join... |
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
import os
from torchbenchmark import add_path, REPO_PATH
import sys
import lightning as L
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
with add_path(LIT_LLAMA_PATH):
from lit_llama.utils import EmptyI... |
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
import dataclasses
from typing import List
def cfg_to_str(cfg: dataclasses.dataclass) -> List[str]:
def rewrite_option(opt: str) -> str:
new_opt = opt.replace("_", "-")
return f"--{new_opt}"
out = []
for fld in dataclasses.fields(cfg):
new_option = rewrite_option(fld.name)
v... |
import sys
from torch.optim.lr_scheduler import _LRScheduler
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = dec... |
"""
Simplifed dlrm model from FAMBench
It doesn't support multiGPU or fbgemm_gpu.
"""
import torch
import sys
import os
import numpy as np
import torch.nn as nn
from torchbenchmark import REPO_PATH
from typing import Tuple, List
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import RECOM... |
import torch.nn as nn
import torch
import sys
import numpy as np
import itertools
from torch._ops import ops
from torch.nn.parameter import Parameter
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import gather, scatter... |
import torch
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b, device):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()).to(device), None
def dlrm_wrap(dlrm, X, lS_o, lS_i, use_gpu, device, ndevices=1)... |
# Currently, this file is not used, because torchbench doesn't support fbgemm embeddding yet;
# Note that FAMBench does support it.
import torch.nn as nn
import torch
import os
import sys
import numpy as np
from torchbenchmark import REPO_PATH
# This file assumes fbgemm_gpu is installed
import fbgemm_gpu
from fbgemm_gp... |
# Original source:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/dlrm/ootb/dlrm_s_pytorch.py
import sys
import torch
import argparse
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
excep... |
import os
import sys
import torch
import subprocess
from torchbenchmark import REPO_PATH
def update_fambench_submodule():
"Update FAMBench submodule of the benchmark repo"
update_command = ["git", "submodule", "update",
"--init", "--recursive", os.path.join("submodules","FAMBench")]
... |
import torch
import sys
import numpy as np
# data generation
import dlrm_data_pytorch as dp
def prep_data(args):
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
if args.data_generation == "dataset":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
t... |
import torch
# OSS import
try:
# pyre-ignore[21]
# @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:dlrm_dataloader
from .data.dlrm_dataloader import get_dataloader
except ImportError:
pass
import itertools
import os
from pyre_extensions import none_throws
from torch import distributed as dis... |
import argparse
from enum import Enum
from typing import List
class InteractionType(Enum):
ORIGINAL = "original"
DCN = "dcn"
PROJECTION = "projection"
def __str__(self):
return self.value
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=... |
import subprocess
import sys
import os
from pathlib import Path
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from typing import List
from torch import distributed as dist
from torch.utils.data import... |
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceGenerationModel
class Model(HuggingFaceGenerationModel):
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_GPT2_generate", test=test, device=device, batch_size=batch_size, extra_args=ext... |
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from ..lit_llama import LIT_LLAMA_PATH
import importlib.util
import os.path
import torch.nn as nn
import sys
from lit_llama.lora import mark_only_lora_as_trainable, lora, lora_state_dict
from torchbenchmark import REPO_PATH
LIT_... |
from torchbenchmark.util.framework.lit_llama import install_lit_llama
if __name__ == '__main__':
install_lit_llama()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __i... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# https://huggingface.co/mosaicml/mpt-7b
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, dev... |
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name, trust_remote_code=True) |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_n... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin
class Model(HuggingFaceModel, HuggingFaceAuthMixin):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
DEEPCOPY = False
def __i... |
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_n... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
from typing import Optional, List
from contextlib import contextmanager, ExitStack
from typing import ContextManager
class PostInitProcessor(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post__init__()
return obj
@contextmanager
def... |
import argparse
import enum
from typing import List, Optional, Tuple
from torchbenchmark.util.backends import list_backends, BACKENDS
from torchbenchmark.util.env_check import is_staged_train_test
TEST_STAGE = enum.Enum('TEST_STAGE', ['FORWARD', 'BACKWARD', 'OPTIMIZER', 'ALL'])
AVAILABLE_PRECISIONS = ["fp32", "tf32", ... |
import argparse
import re
import torch
from enum import Enum
class OpType(Enum):
POINTWISE = 1
NORMS = 2
REDUCTIONS = 3
VIEWS_EXPANDS = 4
REMOVE = 5
IGNORE = 6
op_types = {
"aten::rsqrt": OpType.POINTWISE,
"aten::abs": OpType.POINTWISE,
"aten::eq": OpType.POINTWISE,
"aten::gel... |
"""Utilities for tuning the machine for better benchmark stability.
Written for Amazon linux and Intel CPU, Nvidia GPU althogh many utilities will overlap.
"""
import argparse
import cpuinfo
import distro
import enum
import os
import platform
import psutil
import subprocess
import re
import sys
import typing
from path... |
import importlib
import os
import torch
from contextlib import contextmanager, ExitStack
import warnings
import inspect
import yaml
from pathlib import Path
from typing import ContextManager, Optional, List, Tuple, Generator
from torch.utils._pytree import tree_map
from torchbenchmark import REPO_PATH
from torchbenchma... |
"""
Return a list of recent PyTorch wheels published on download.pytorch.org.
Users can specify package name, python version, platform, and the number of days to return.
If one of the packages specified is missing on one day, the script will skip outputing the results on that day.
"""
import os
import re
import reques... |
"""
Utils for model metadata
"""
from typing import Any, List, Dict
def match_item(item_name: str, item_val: str, skip_item: Dict[str, Any]) -> bool:
if item_name not in skip_item:
return True
return skip_item[item_name] == item_val
def skip_by_metadata(test: str, device:str, extra_args: List[str], m... |
def prefetch_loader(loader, device):
result = []
for data in loader:
items = []
for item in data:
items.append(item.to(device))
result.append(tuple(items))
return result |
"""
PyTorch benchmark env check utils.
This file may be loaded without torch packages installed, e.g., in OnDemand CI.
"""
import copy
import importlib
import os
import argparse
import logging
from contextlib import contextmanager, ExitStack
from typing import Any, Dict, List, Optional
MAIN_RANDOM_SEED = 1337
# rounds... |
import re
import torch
from torch.ao.quantization import QuantWrapper, get_default_qconfig_mapping, get_default_qconfig_propagation_list
from torch.ao.quantization.quantize_fx import _fuse_fx, prepare_fx, convert_fx
from torchbenchmark.util.env_check import is_hf_model
def _append_attr(fx_module, module, fx_white_list... |
import json
import os
import pandas as pd
import typing
class BenchmarkData:
def __init__(self):
self._benchmark_data = {}
self._machine_info = {}
self._commit_info = {}
self._names_all = set()
self._names_common = set()
self._tags = []
self._json_raw = []
... |
import os
import sys
import subprocess
import traceback
from pathlib import Path
from torchbenchmark import REPO_PATH
LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama")
def update_lit_llama_submodule():
update_command = ["git", "submodule", "update",
"--init", "--recursive",... |
import subprocess
import os
import sys
from pathlib import Path
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def pip_install_requirements():
requirements_file = os.path.join(CURRENT_DIR, "requirements.txt")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirem... |
import torch
from torchbenchmark.util.model import BenchmarkModel
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
from typing import Optional, List
class DiffuserModel(BenchmarkModel):
DIFFUSER_MODEL = True
def __init__(self, name: str, test: str, device: str, ba... |
import torch
from typing import Tuple
def enable_cudagraph(model: 'torchbenchmark.util.model.BenchmarkModel', example_inputs: Tuple[torch.tensor]):
optimizer = model.optimizer
loss_fn = model.loss_fn
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.str... |
import os
import torch
import typing
import torch.optim as optim
import torchvision.models as models
from contextlib import nullcontext
from torchbenchmark.util.model import BenchmarkModel
from typing import Tuple, Generator, Optional
class TorchVisionModel(BenchmarkModel):
# To recognize this is a torchvision mod... |
import argparse
def parse_tb_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--graph_type", choices=["dense", "sparse"], default="dense", help="Determine dense graph or sparse graph")
args, unknown_args = parser.parse_known_args(args)
return args, unknown_args
|
import subprocess
import os.path
import sys
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def install_pytorch_geometric():
pip_install_requirements()
def pip_install_requirements():
requirements_file = os.path.join(CURRENT_DIR, "requirements.txt")
subprocess.check_call([sys.executable, '-m', ... |
import torch
import sys
import typing
from contextlib import nullcontext
from torchbenchmark.util.model import BenchmarkModel
import torch_geometric
from torch_geometric.nn import GAT, GCN, GraphSAGE, GIN, EdgeCNN
from torchbenchmark.tasks import GNN
import torch.nn.functional as F
from tqdm import tqdm
from pathlib i... |
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniq... |
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniq... |
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniq... |
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniq... |
import torch.nn as nn
import dataclasses
from timm.optim import create_optimizer
@dataclasses.dataclass
class OptimizerOption:
lr: float
opt: str
weight_decay: float
momentum: float
class TimmConfig:
def __init__(self, model, device):
self.model = model
self.device = device
... |
from contextlib import suppress
import torch
import typing
import timm
from torchbenchmark.util.model import BenchmarkModel
from .timm_config import TimmConfig
from typing import Generator, Tuple, Optional
class TimmModel(BenchmarkModel):
# To recognize this is a timm model
TIMM_MODEL = True
# These two va... |
from datasets import load_dataset
def prep_dataset(hf_args):
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be do... |
"""
Hacked from https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py
It runs HuggingFace transformer models translation on WMT16
"""
import argparse
from transformers import SchedulerType
task_to_keys = {
# hf args to include for different tasks
# en... |
from datasets import load_dataset
from transformers import PretrainedConfig
from .args import task_to_keys
def preprocess_dataset(hf_args, config, model, tokenizer, raw_datasets, num_labels, label_list, is_regression, accelerator):
# Preprocessing the raw_datasets
if hf_args.task_name is not None:
sen... |
"""
Hacked from https://github.com/huggingface/transformers/blob/6fc38adff272ea3148e05888edf67eeb00170453/examples/pytorch/text-classification/run_glue.py
It runs HuggingFace transformer models on the GLUE benchmark
"""
import argparse
from transformers import SchedulerType
task_to_keys = {
"cola": ("sentence", No... |
import argparse
def parse_tb_args(args):
parser = argparse.ArgumentParser()
# default resolution: 800x1333
parser.add_argument("--resize", choices=["default", "448x608"], default="default", help="Resize the image to specified size")
args, unknown_args = parser.parse_known_args(args)
return args, un... |
import os
import shutil
import sys
import subprocess
from pathlib import Path
from urllib import request
from utils import s3_utils
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
# Load pre-trained weights
# copied from https://github.com/facebookresearch/detectron2/blob/5934a1452801e669bbf9479ae222ce... |
from torchbenchmark.util.framework.detectron2.config import parse_tb_args
from torchbenchmark.util.model import BenchmarkModel
import itertools
import os
from pathlib import Path
import torch
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DI... |
"""
Patch the transformer source code to enable optimizations.
"""
import os
import subprocess
import sys
from .model_factory import class_models
from transformers import AutoConfig, ReformerConfig, BigBirdConfig, BertConfig, WhisperConfig, LlamaConfig
PATCH_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__... |
import argparse
import torch
from torchbenchmark.util.model import BenchmarkModel
from typing import List, Dict, Tuple
def add_bool_arg(parser: argparse.ArgumentParser, name: str, default_value: bool=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=name, a... |
import math
import random
import os
import torch
from contextlib import nullcontext
from torch import optim
import torch.nn as nn
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import transformers
from transformers import AutoConfig, ReformerConfig, BertConfig, GenerationConfi... |
import argparse
import importlib
import os
import sys
import torch
import uuid
from pathlib import Path
from typing import List
try:
import submitit
except ImportError:
submitit = None
def parse_args(args: List[str]=None):
parser = argparse.ArgumentParser(description='PyTorch Distributed Benchmark', add_... |
from datetime import datetime
import os
from pathlib import Path
from statistics import stdev
import torch
from torch.cuda import Event
from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler
from torchbenchmark.util.e2emodel import E2EBenchmarkModel, nested
import torch.distributed as dist
cl... |
from datetime import datetime
import os
from pathlib import Path
from statistics import stdev
from typing import Optional
import numpy as np
import torch
from torch.cuda import Event
from torch.profiler import profile, ProfilerActivity, schedule, tensorboard_trace_handler
from torchbenchmark.util.env_check import same... |
from io import UnsupportedOperation
import os
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
def apply_trainer(model, trainer):
local_rank = int(os.getenv("LOCAL_RANK", -1))
if trainer == "ddp" or trainer == "ddp_no... |
import torch
import argparse
from torchbenchmark.util.backends import create_backend
from typing import List
def parse_torchscript_args(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
# enable ofi by default
parser.add_argument("--no-ofi", action='store_true', help="disable opti... |
import os
import argparse
import torch
from torchbenchmark.util.backends import create_backend
from typing import List, Tuple
try:
from fx2ait.acc_tracer import acc_tracer
from fx2ait.ait_module import AITModule
from fx2ait.fx2ait import AITInterpreter
except:
# if fx2ait is not available, skip it.
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.