python_code stringlengths 0 258k |
|---|
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
from .generate_score_config ... |
from accelerate.utils.dataclasses import DeepSpeedPlugin
import torch
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.utils.data import DataLoader
from torchbenchmark.util.e2emodel ... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
import math
import os
from pathlib import Path
from torch.utils.data import DataLoader
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoToken... |
from accelerate.utils.dataclasses import DeepSpeedPlugin
import functools
import torch
import numpy as np
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap impor... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# upstream repo: https://github.com/kuangliu/pytorch-cifar
import torch
import torchvision
import torchvision.transforms as transforms
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
import os
from tqdm import tqdm
from pathlib import Path
# setup environmen... |
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
... |
import os
import sys
import torch
import subprocess
from pathlib import Path
from dataclasses import dataclass
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from typing import Optional, List
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
FAMBENCH_ROOT = CURRENT_DIR.parent.parent.parent.... |
import sys
import subprocess
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import importlib
import sys
from urllib import request
from typing import List, Dict
TORCH_DEPS = ['torch', 'torchvision', 'torchtext', 'torchaudio']
proxy_suggestion = "Unable to verify https connectivity, " \
"required for setup.\n" \
"Do you need to use a proxy?"
class add_pa... |
from typing import Any, List, Optional
import boto3
import os
import json
from pathlib import Path
USERBENCHMARK_S3_BUCKET = "ossci-metrics"
USERBENCHMARK_S3_OBJECT = "torchbench-userbenchmark"
class S3Client:
def __init__(self, bucket, object):
self.s3 = boto3.client('s3')
self.bucket = bucket
... |
import argparse
import subprocess
DEFAULT_PYTHON_VERSION = "3.10"
PYTHON_VERSION_MAP = {
"3.8": {
"pytorch_url": "cp38",
},
"3.10": {
"pytorch_url": "cp310",
},
}
def create_conda_env(pyver: str, name: str):
command = [ "conda", "create", "-n", name, "-y", f"python={pyver}" ]
... |
import os
import re
import importlib
import argparse
import subprocess
from pathlib import Path
from typing import Optional
# defines the default CUDA version to compile against
DEFAULT_CUDA_VERSION = "11.7"
CUDA_VERSION_MAP = {
"11.6": {
"pytorch_url": "cu116",
"magma_version": "magma-cuda116"... |
"""Add Task abstraction to reduce the friction of controlling a remote worker."""
import abc
import ast
import functools
import inspect
import marshal
import textwrap
import typing
from components._impl.workers import base
class TaskBase(abc.ABC):
"""Convenience layer to allow methods to be called in a worker.
... |
import marshal
import textwrap
import typing
from components._impl.workers import base
class InProcessWorker(base.WorkerBase):
"""Worker which reuses the current Python process.
The implementation of this worker borrows from the builtin `timeit.Timer`
class, and simply reuses the current interpreter. (M... |
import contextlib
import datetime
import io
import os
import marshal
import pathlib
import shutil
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import typing
from pathlib import Path
import components
from components._impl.workers import base
from components._impl.workers impor... |
"""Utilities to handle communication between parent worker.
This module implements three principle facilities:
1) Raw IPC (via the Pipe class)
2) Exception propagation (via the SerializedException class)
3) A run loop for the worker (via the run_loop function)
"""
import contextlib
import dataclasses
impor... |
import abc
import ast
import typing
class WorkerBase(abc.ABC):
"""Interface for the core worker abstraction.
Conceptually, a worker is modeled as a remote interactive Python terminal.
One can send code to be executed (analogous to writing to stdin), and
perform basic stores and loads (analogous to RP... |
"""Unit tests specifically for the components of SubprocessWorker.
End-to-end tests (e.g. does SubprocessWorker properly implement the
WorkerBase API) still live in `test_worker`.
"""
import functools
import os
import sys
import textwrap
import threading
import typing
from torch.testing._internal.common_utils import... |
import os
import subprocess
import sys
import signal
import textwrap
import typing
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
try:
from components._impl.workers import base as base_worker
from components._impl.workers import in_process_worker
from components._impl.wo... |
"""
This is a test file for TorchBenchAnalyzer
"""
from model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
def work():
# A simple mm test
import torch
n=4096
x = torch.ones((n, n), dtype=torch.float32, device="cuda")
y = torch.ones((n, n),dtype=torch.float32, device="cuda")
# configure mo... |
from typing import Optional, OrderedDict, Tuple
from .dcgm.cpu_monitor import CPUMonitor
from .dcgm.dcgm_monitor import DCGMMonitor
from .dcgm.nvml_monitor import NVMLMonitor
from .tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException
from .tb_dcgm_types.gpu_device_factory import GPUDeviceFactory
from .dcgm ... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
import os
import time
from .monitor import Monitor
import psutil
from ..tb_dcgm_types.cpu_peak_memory import CPUPeakMemory
class CPUMonitor(Monitor):
"""
A CPU monitor that uses psutil to monitor CPU usage
"""
def __init__(self, frequency, metrics_needed=[], monitored_pid=None):
super().__init... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
import time
from .monitor import Monitor
from ..tb_dcgm_types.gpu_free_memory import GPUFreeMemory
from ..tb_dcgm_types.gpu_tensoractive import GPUTensorActive
from ..tb_dcgm_types.gpu_peak_memory import GPUPeakMemory
from ..tb_dcgm_types.gpu_utilization import GPUUtilization
from ..tb_dcgm_types.gpu_power_usage import... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under ... |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPCIERX(GPURecord):
"""
GPU PCIe RX Bytes record. The number of bytes of active PCIe rx (read) data including both header and payload.
Note that this is from the perspective of the GPU, so copying data from host... |
import logging
LOGGER_NAME = 'TorchBenchLogger'
def set_logger(logger_level=logging.WARNING):
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(LOGGER_NAME)
lo... |
# default is 0.01 second
DEFAULT_MONITORING_INTERVAL = 0.01
class AnalayzerConfig:
def __init__(self):
self.monitoring_interval = DEFAULT_MONITORING_INTERVAL |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPCIETX(GPURecord):
"""
GPU PCIe TX Bytes record. The number of bytes of active PCIe tx (transmit) data including both header and payload.
Note that this is from the perspective of the GPU, so copying data from ... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
from .record import Record
class CPURecord(Record):
"""
This is a base class for any
CPU based record
"""
def __init__(self, value, timestamp=0):
"""
Parameters
----------
value : float
The value of the CPU metrtic
device_uuid : str
... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
from functools import total_ordering
from .cpu_record import CPURecord
@total_ordering
class CPUPeakMemory(CPURecord):
"""
The peak memory usage in the CPU.
"""
tag = "cpu_peak_memory"
def __init__(self, value, timestamp=0):
"""
Parameters
----------
value : float... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUDRAMActive(GPURecord):
"""
GPU DRAM active record
"""
tag = "gpu_dramactive"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value :... |
class TorchBenchAnalyzerException(Exception):
"""
A custom exception specific to the TorchBench Model Analyzer
"""
pass
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUFP32Active(GPURecord):
"""
GPU FP32 active record
"""
tag = "gpu_fp32active"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value :... |
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import multipr... |
import os
import re
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
from datetime import datetime, date, timedelta
from pathlib import Path
torch_wheel_cuda_version = "cu113"
torch_wheel_python_version = "cp38"
torch_wheel_platform = "linux_x86_64"
torch_wheel_nightly_base = f"https:/... |
import argparse
import pathlib
import yaml
CORE_MODEL_PATH = pathlib.Path(__file__).parent.parent.absolute().joinpath("torchbenchmark", "models")
def get_model_list():
models = list(map(lambda x: x.name, filter(lambda x: x.is_dir(), CORE_MODEL_PATH.iterdir())))
return models
def check_csv_file(csv_file, know... |
"""
Dump the contents of a pytest benchmark .json file.
"""
import argparse
import json
from tabulate import tabulate
def print_benchmark_stats(data):
print_stats = ['min', 'max', 'mean', 'stddev', 'rounds', 'median']
headers = ['name'] + print_stats
rows = []
for benchmark in data['benchmarks']:
... |
import pathlib
import torch
from typing import Optional, List, Tuple
from torchbenchmark import ModelTask
import os
import sys
import time
import numpy
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
from run_sweep import WORKER_TIMEOUT, WARMUP_ROUNDS, ModelTestResult, NANOSECONDS_PER_MILLISECOND... |
"""Scribe Uploader for Pytorch Benchmark V2 Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import mult... |
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports userbenchmark result json file.
"""
import argparse
import time
import json
import os
import requests
from collections import defaultdict
from datetime import datetime
def get_metrics_date_from_file(fname: str) -> str:
bname = os.path.basename(fnam... |
import argparse
import json
import sys
from pathlib import Path
from datetime import datetime
REPO_ROOT = Path(__file__).parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_ty... |
"""
This script reads from a PyTorch benchmarking directory and generates a yaml file
that drives the bisector to run tests on the specified PyTorch commits.
This only works on V1 and later benchmark, V0 is not supported.
"""
import os
import re
import git
import json
import yaml
import argparse
import dataclasses
from... |
import os
import re
import tabulate
import argparse
MAGIC_PREFIX = "STABLE_TEST_MODEL: "
THRESHOLD = 7
def _parse_pr_body(body):
magic_lines = list(filter(lambda x: MAGIC_PREFIX == x[:len(MAGIC_PREFIX)], body.splitlines()))
if len(magic_lines):
return magic_lines[-1][len(MAGIC_PREFIX):].strip()
def _... |
"""
This script runs userbenchmarks abtest upon two PyTorch versions.
"""
import argparse
import os
import subprocess
import shutil
import sys
import json
from pathlib import Path
from bmutils import REPO_ROOT, add_path
from typing import Dict, Optional
with add_path(REPO_ROOT):
import torchbenchmark.util.gitutils... |
"""
Script that runs torchbench with a benchmarking config.
The configs are located within the configs/ directory.
For example, the default config we use is `torchdynamo/eager-overhead`
"""
import re
import sys
import os
import yaml
import argparse
import subprocess
import itertools
from dataclasses import dataclass
fr... |
import argparse
import os
import json
import yaml
from pathlib import Path
WORKFLOW_LINK_TEMPLATE = "https://github.com/pytorch/benchmark/actions/runs/"
def check_env(bisection_root: str):
"Check `bisection_root` contains bisection config file, github issue file, and result json."
# gh-issue.md exists
# r... |
import sys
from pathlib import Path
CURRENT_DIR = Path(__file__).parent
REPO_ROOT = str(CURRENT_DIR.parent.parent.parent)
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
... |
import json
import os
import re
from pathlib import Path
import argparse
ATTRIBUTES = ["batch_size", "precision"]
def get_nonempty_json(d):
r = []
for f in filter(lambda x: x.endswith(".json"), os.listdir(d)):
fullpath = os.path.join(d, f)
if os.stat(fullpath).st_size:
r.append(ful... |
import argparse
import sys
import subprocess
from pathlib import Path
from aicluster import run_aicluster_benchmark
REPO_ROOT = Path(__file__).parent.parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)... |
"""
The script to upload TorchBench CI result from S3 to Scribe (Internal).
To run this script, users need to set two environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
It assumes the following hierarchy of the result directory:
torchbench-aicluster-metrics/
|-distributed
|-metrics-20220805192500.js... |
import yaml
from pathlib import Path
CURRENT_DIR = Path(__file__).parent
def list_userbenchmarks():
ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ]
ub_names = list(map(lambda x: x.name, ub_dirs))
return ub_names
def get_ci_from_ub(ub_name):
ci_file =... |
import os
import sys
from datetime import datetime, timedelta
import time
import json
from pathlib import Path
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
REPO_PATH = Path(os.path.abspath(__file__)).parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark"
PLATFORMS = [
"gcp_... |
import itertools
import time
from datetime import datetime
from typing import List
import yaml
import json
import numpy as np
import argparse
from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output
with add_path(REPO_PATH):
from components._impl.workers.subprocess_rpc import Unserial... |
from pathlib import Path
import json
import re
def get_run(test_dir):
run = {}
testdir_name = test_dir.name
regex = "cuda-(.*)-(.*)"
g = re.match(regex, testdir_name).groups()
run["test"] = g[0]
run["cuda_version"] = g[1]
eager_json = test_dir.joinpath("json", "eager.json")
assert eager... |
import argparse
import time
import sys
import subprocess
from datetime import datetime
from .result_analyzer import analyze
from typing import List
from ..utils import dump_output, get_output_dir, get_output_json, add_path, REPO_PATH
with add_path(REPO_PATH):
from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUD... |
from pathlib import Path
import re
import functools
def is_userbenchmark_runscript(run_script_file):
MAGIC_LINE = "# GENERATED BY userbenchmark/release-test/__init__.py. DO NOT EDIT!"
with open(run_script_file, "r") as rsf:
script = rsf.read()
if MAGIC_LINE in script:
return True
return... |
import argparse
import os
import yaml
import time
import shutil
import itertools
import subprocess
from datetime import datetime
from git import Repo
from pathlib import Path
from typing import List
from ..utils import dump_output, get_output_dir, get_output_json
from .result_analyzer import analyze
# Expected WORK_DI... |
"""
Run PyTorch cpu benchmarking.
"""
import json
import os
import re
import sys
import time
from datetime import datetime
from pathlib import Path
REPO_PATH = Path(__file__).absolute().parent.parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark"
class add_path():
def __init__(self, path):
self.pat... |
"""
Run PyTorch cpu benchmarking.
"""
import argparse
import itertools
import os
import subprocess
import sys
import time
import yaml
from datetime import datetime
from pathlib import Path
from typing import List
from .cpu_utils import REPO_PATH, get_output_dir, get_output_json, dump_output, analyze
from ..utils impor... |
"""
Run PyTorch cpu benchmarking.
"""
import argparse
import os
import numpy
from typing import List, Dict, Optional
from pathlib import Path
from cpu_utils import add_path, REPO_PATH, get_output_dir, get_output_json, dump_output
with add_path(str(REPO_PATH)):
from torchbenchmark.util.experiment.instantiator imp... |
from typing import List
import submitit
import torch
from torchbenchmark.util.distributed.submit import parse_args, get_init_file, TrainerWrapper
from ..utils import dump_output
BM_NAME = "distributed"
def gen_metrics_from_result(result):
assert isinstance(result, List), "The result of submitit should be a list."... |
import csv
import json
import copy
import argparse
from typing import OrderedDict
from dataclasses import dataclass
import os
import pickle
from collections import defaultdict
import tabulate
import sys
def parse_partial(args):
"""
Schema:
model_data["model"]["backend"][#nodes] = result
where "result" ... |
import argparse
import importlib
import os
import copy
import csv
import dataclasses
import functools
import io
import json
import multiprocessing
import queue
import submitit
import time
from datetime import datetime, timedelta
import sys
import torch
import uuid
import warnings
from pathlib import Path
from torch.di... |
"""
Run PyTorch nightly benchmarking.
"""
import argparse
import itertools
import json
import math
import os
import yaml
import numpy
from typing import List, Tuple, Dict, Optional, Any
from ..utils import REPO_PATH, add_path, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experim... |
from typing import Optional
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric
DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.07
def run(control, treatment) -> Optional[TorchBenchABTestResult]:
control_env = control["environ"]
treatment_env = treatment["environ"]
control_metrics = control["metrics... |
from pathlib import Path
from typing import Any, Dict, List, Tuple
from torchbenchmark import load_model_by_name
import torch
from torch import _dynamo as torchdynamo
from torch.optim import Adadelta, Adagrad, Adam, AdamW, Adamax, ASGD, SGD, RAdam, Rprop, RMSprop, NAdam, SparseAdam, LBFGS
import torch.utils.benchmark a... |
#!/bin/bash python3
'''
This script is intended for the CI context only! The whole purpose behind this script is to enable
process/context/memory isolation across different models and devices. The OG script (which this
script calls) is the userbenchmark/optim/__init__.py script, which is better documented and what is
i... |
from typing import Optional
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric
DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.07
def run(control, treatment) -> Optional[TorchBenchABTestResult]:
control_env = control["environ"]
treatment_env = treatment["environ"]
control_metrics = control["metrics... |
import argparse
from datetime import datetime
import git
import numpy as np
import os
import json
import subprocess
import sys
import time
import shutil
from pathlib import Path
from ..utils import dump_output, get_output_dir, get_output_json, REPO_PATH
from typing import List
BM_NAME = "instruction-count"
RESULT_JSO... |
import argparse
import csv
import functools
import gc
import io
import itertools
import logging
import numpy as np
import os
import re
import sys
import time
import torch
from torch import nn
from torch.jit import fuser, optimized_execution
from os.path import abspath
from scipy.stats import ttest_ind
import importlib
... |
"""
Test user-customized invoke function.
"""
import argparse
from typing import List
from ..utils import REPO_PATH, add_path, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
... |
import torch
import torch.nn as nn
from functorch import vmap, jacfwd, jacrev
from .util import BenchmarkCase
# batched hessians of fully connected layers is a popular quantity
# in physics-related models.
# This test case is from https://github.com/pytorch/functorch/issues/989
# We haven't been able to get the full m... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.