python_code
stringlengths
0
229k
import argparse import subprocess DEFAULT_PYTHON_VERSION = "3.10" PYTHON_VERSION_MAP = { "3.8": { "pytorch_url": "cp38", }, "3.10": { "pytorch_url": "cp310", }, "3.11": { "pytorch_url": "cp311", }, } def create_conda_env(pyver: str, name: str): command = [ "conda",...
import os import re import importlib import argparse import subprocess from pathlib import Path from typing import Optional # defines the default CUDA version to compile against DEFAULT_CUDA_VERSION = "11.8" CUDA_VERSION_MAP = { "11.8": { "pytorch_url": "cu118", "magma_version": "magma-cuda118"...
"""Add Task abstraction to reduce the friction of controlling a remote worker.""" import abc import ast import functools import inspect import marshal import textwrap import typing from components._impl.workers import base class TaskBase(abc.ABC): """Convenience layer to allow methods to be called in a worker. ...
import marshal import textwrap import typing from components._impl.workers import base class InProcessWorker(base.WorkerBase): """Worker which reuses the current Python process. The implementation of this worker borrows from the builtin `timeit.Timer` class, and simply reuses the current interpreter. (M...
import contextlib import datetime import io import os import marshal import pathlib import shutil import signal import subprocess import sys import tempfile import textwrap import time import typing from pathlib import Path import components from components._impl.workers import base from components._impl.workers impor...
"""Utilities to handle communication between parent worker. This module implements three principle facilities: 1) Raw IPC (via the Pipe class) 2) Exception propagation (via the SerializedException class) 3) A run loop for the worker (via the run_loop function) """ import contextlib import dataclasses impor...
import abc import ast import typing class WorkerBase(abc.ABC): """Interface for the core worker abstraction. Conceptually, a worker is modeled as a remote interactive Python terminal. One can send code to be executed (analogous to writing to stdin), and perform basic stores and loads (analogous to RP...
"""Unit tests specifically for the components of SubprocessWorker. End-to-end tests (e.g. does SubprocessWorker properly implement the WorkerBase API) still live in `test_worker`. """ import functools import os import sys import textwrap import threading import typing from torch.testing._internal.common_utils import...
import os import subprocess import sys import signal import textwrap import typing import torch from torch.testing._internal.common_utils import TestCase, run_tests try: from components._impl.workers import base as base_worker from components._impl.workers import in_process_worker from components._impl.wo...
""" This is a test file for TorchBenchAnalyzer """ from model_analyzer.TorchBenchAnalyzer import ModelAnalyzer def work(): # A simple mm test import torch n=4096 x = torch.ones((n, n), dtype=torch.float32, device="cuda") y = torch.ones((n, n),dtype=torch.float32, device="cuda") # configure mo...
from typing import Optional, OrderedDict, Tuple from .dcgm.cpu_monitor import CPUMonitor from .dcgm.dcgm_monitor import DCGMMonitor from .dcgm.nvml_monitor import NVMLMonitor from .tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException from .tb_dcgm_types.gpu_device_factory import GPUDeviceFactory from .dcgm ...
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a...
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a...
import os import time from .monitor import Monitor import psutil from ..tb_dcgm_types.cpu_peak_memory import CPUPeakMemory class CPUMonitor(Monitor): """ A CPU monitor that uses psutil to monitor CPU usage """ def __init__(self, frequency, metrics_needed=[], monitored_pid=None): super().__init...
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
import time from .monitor import Monitor from ..tb_dcgm_types.gpu_free_memory import GPUFreeMemory from ..tb_dcgm_types.gpu_tensoractive import GPUTensorActive from ..tb_dcgm_types.gpu_peak_memory import GPUPeakMemory from ..tb_dcgm_types.gpu_utilization import GPUUtilization from ..tb_dcgm_types.gpu_power_usage import...
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appli...
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under ...
from functools import total_ordering from .gpu_record import GPURecord @total_ordering class GPUPCIERX(GPURecord): """ GPU PCIe RX Bytes record. The number of bytes of active PCIe rx (read) data including both header and payload. Note that this is from the perspective of the GPU, so copying data from host...
import logging LOGGER_NAME = 'TorchBenchLogger' def set_logger(logger_level=logging.WARNING): formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) logger = logging.getLogger(LOGGER_NAME) lo...
# default is 0.01 second DEFAULT_MONITORING_INTERVAL = 0.01 class AnalayzerConfig: def __init__(self): self.monitoring_interval = DEFAULT_MONITORING_INTERVAL
from functools import total_ordering from .gpu_record import GPURecord @total_ordering class GPUPCIETX(GPURecord): """ GPU PCIe TX Bytes record. The number of bytes of active PCIe tx (transmit) data including both header and payload. Note that this is from the perspective of the GPU, so copying data from ...
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applic...
from .record import Record class CPURecord(Record): """ This is a base class for any CPU based record """ def __init__(self, value, timestamp=0): """ Parameters ---------- value : float The value of the CPU metrtic device_uuid : str ...
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applic...
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applic...
from functools import total_ordering from .cpu_record import CPURecord @total_ordering class CPUPeakMemory(CPURecord): """ The peak memory usage in the CPU. """ tag = "cpu_peak_memory" def __init__(self, value, timestamp=0): """ Parameters ---------- value : float...
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a...
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applic...
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applic...
from functools import total_ordering from .gpu_record import GPURecord @total_ordering class GPUDRAMActive(GPURecord): """ GPU DRAM active record """ tag = "gpu_dramactive" def __init__(self, value, device_uuid=None, timestamp=0): """ Parameters ---------- value :...
class TorchBenchAnalyzerException(Exception): """ A custom exception specific to the TorchBench Model Analyzer """ pass
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a...
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applic...
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by a...
from functools import total_ordering from .gpu_record import GPURecord @total_ordering class GPUFP32Active(GPURecord): """ GPU FP32 active record """ tag = "gpu_fp32active" def __init__(self, value, device_uuid=None, timestamp=0): """ Parameters ---------- value :...
"""Scribe Uploader for Pytorch Benchmark Data Currently supports data in pytest-benchmark format but can be extended. New fields can be added just by modifying the schema in this file, schema checking is only here to encourage reusing existing fields and avoiding typos. """ import argparse import time import multipr...
import argparse import pathlib import yaml CORE_MODEL_PATH = pathlib.Path(__file__).parent.parent.absolute().joinpath("torchbenchmark", "models") def get_model_list(): models = list(map(lambda x: x.name, filter(lambda x: x.is_dir(), CORE_MODEL_PATH.iterdir()))) return models def check_csv_file(csv_file, know...
""" Dump the contents of a pytest benchmark .json file. """ import argparse import json from tabulate import tabulate def print_benchmark_stats(data): print_stats = ['min', 'max', 'mean', 'stddev', 'rounds', 'median'] headers = ['name'] + print_stats rows = [] for benchmark in data['benchmarks']: ...
import pathlib import torch from typing import Optional, List, Tuple from torchbenchmark import ModelTask import os import sys import time import numpy from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer from run_sweep import WORKER_TIMEOUT, WARMUP_ROUNDS, ModelTestResult, NANOSECONDS_PER_MILLISECOND...
"""Scribe Uploader for Pytorch Benchmark V2 Data Currently supports data in pytest-benchmark format but can be extended. New fields can be added just by modifying the schema in this file, schema checking is only here to encourage reusing existing fields and avoiding typos. """ import argparse import time import mult...
"""Scribe Uploader for Pytorch Benchmark Data Currently supports userbenchmark result json file. """ import argparse import time import json import os import requests from collections import defaultdict from datetime import datetime def get_metrics_date_from_file(fname: str) -> str: bname = os.path.basename(fnam...
import argparse import sys from pathlib import Path REPO_ROOT = Path(__file__).parent.parent.parent.resolve() class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path) def __exit__(self, exc_type, exc_value, traceback): try: ...
""" This script reads from a PyTorch benchmarking directory and generates a yaml file that drives the bisector to run tests on the specified PyTorch commits. This only works on V1 and later benchmark, V0 is not supported. """ import os import re import git import json import yaml import argparse import dataclasses from...
import os import re import tabulate import argparse MAGIC_PREFIX = "STABLE_TEST_MODEL: " THRESHOLD = 7 def _parse_pr_body(body): magic_lines = list(filter(lambda x: MAGIC_PREFIX == x[:len(MAGIC_PREFIX)], body.splitlines())) if len(magic_lines): return magic_lines[-1][len(MAGIC_PREFIX):].strip() def _...
""" This script runs userbenchmarks abtest upon two PyTorch versions. """ import argparse import os import subprocess import shutil import sys import json from pathlib import Path from bmutils import REPO_ROOT, add_path from typing import Dict, Optional with add_path(REPO_ROOT): import torchbenchmark.util.gitutils...
""" Script that runs torchbench with a benchmarking config. The configs are located within the configs/ directory. For example, the default config we use is `torchdynamo/eager-overhead` """ import re import sys import os import yaml import argparse import subprocess import itertools from dataclasses import dataclass fr...
import argparse import os import json import yaml from pathlib import Path WORKFLOW_LINK_TEMPLATE = "https://github.com/pytorch/benchmark/actions/runs/" def check_env(bisection_root: str): "Check `bisection_root` contains bisection config file, github issue file, and result json." # gh-issue.md exists # r...
import sys from pathlib import Path CURRENT_DIR = Path(__file__).parent REPO_ROOT = str(CURRENT_DIR.parent.parent.parent) class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path) def __exit__(self, exc_type, exc_value, traceback): ...
import json import os import re from pathlib import Path import argparse ATTRIBUTES = ["batch_size", "precision"] def get_nonempty_json(d): r = [] for f in filter(lambda x: x.endswith(".json"), os.listdir(d)): fullpath = os.path.join(d, f) if os.stat(fullpath).st_size: r.append(ful...
import argparse import sys import subprocess from pathlib import Path from aicluster import run_aicluster_benchmark REPO_ROOT = Path(__file__).parent.parent.parent.parent.resolve() class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path)...
""" The script to upload TorchBench CI result from S3 to Scribe (Internal). To run this script, users need to set two environment variables: - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY It assumes the following hierarchy of the result directory: torchbench-aicluster-metrics/ |-distributed |-metrics-20220805192500.js...
from pathlib import Path from typing import List CURRENT_DIR = Path(__file__).parent def list_userbenchmarks() -> List[str]: ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ] ub_names = list(map(lambda x: x.name, ub_dirs)) return ub_names def get_ci_from_u...
import os import sys import yaml from datetime import datetime, timedelta import time import json import yaml from pathlib import Path from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Callable REPO_PATH = Path(os.path.abspath(__file__)).parent.parent USERBENCHMARK_OUTPUT_PREFIX = ...
import itertools import time from datetime import datetime from typing import List import json import numpy as np import argparse import re import torch from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output with add_path(REPO_PATH): from torchbenchmark.util.experiment.instantiator ...
import itertools import time from datetime import datetime from typing import List import yaml import json import numpy as np import argparse from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output with add_path(REPO_PATH): from components._impl.workers.subprocess_rpc import Unserial...
import argparse import time import sys import subprocess from datetime import datetime from .result_analyzer import analyze from typing import List from ..utils import dump_output, get_output_dir, get_output_json, add_path, REPO_PATH with add_path(REPO_PATH): from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUD...
from pathlib import Path import json import re def get_run(test_dir): run = {} testdir_name = test_dir.name regex = "cuda-(.*)-(.*)" g = re.match(regex, testdir_name).groups() run["test"] = g[0] run["cuda_version"] = g[1] eager_json = test_dir.joinpath("json", "eager.json") assert eager...
import argparse import os import yaml import time import shutil import itertools import subprocess from datetime import datetime from git import Repo from pathlib import Path from typing import List from ..utils import dump_output, get_output_dir, get_output_json from .result_analyzer import analyze # Expected WORK_DI...
from pathlib import Path import re import functools def is_userbenchmark_runscript(run_script_file): MAGIC_LINE = "# GENERATED BY userbenchmark/release-test/__init__.py. DO NOT EDIT!" with open(run_script_file, "r") as rsf: script = rsf.read() if MAGIC_LINE in script: return True return...
""" Run PyTorch cpu benchmarking. """ import argparse import itertools import os import subprocess import sys import time import yaml from datetime import datetime from pathlib import Path from typing import List from .cpu_utils import REPO_PATH, parse_str_to_list, validate, get_output_dir, get_output_json, dump_outpu...
""" Run PyTorch cpu benchmarking. """ import json import os import re import sys import time from datetime import datetime from pathlib import Path from typing import List, Dict, Optional REPO_PATH = Path(__file__).absolute().parent.parent.parent USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark" class add_path(): de...
""" Run PyTorch cpu benchmarking. """ import argparse import os import numpy from typing import List, Dict, Optional from pathlib import Path from cpu_utils import add_path, REPO_PATH, validate, parse_str_to_list, list_metrics, get_output_dir, get_output_json, dump_output with add_path(str(REPO_PATH)): from torc...
from typing import List import torch from torchbenchmark.util.distributed.submit import parse_args, get_init_file, TrainerWrapper from ..utils import dump_output BM_NAME = "distributed" def gen_metrics_from_result(result): assert isinstance(result, List), "The result should be a list." metrics = {} for re...
import sys import subprocess def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
import argparse import importlib import os import copy import csv import dataclasses import functools import io import json import multiprocessing import queue import submitit import time from datetime import datetime, timedelta import sys import torch import uuid import warnings from pathlib import Path from torch.di...
import csv import json import copy import argparse from typing import OrderedDict from dataclasses import dataclass import os import pickle from collections import defaultdict import tabulate import sys def parse_partial(args): """ Schema: model_data["model"]["backend"][#nodes] = result where "result" ...
""" Run PyTorch nightly benchmarking. """ import re import argparse import itertools import json import math import os import yaml import numpy from typing import List, Tuple, Dict, Optional, Any from ..utils import REPO_PATH, add_path, get_output_json, get_default_output_json_path from . import BM_NAME with add_path...
BM_NAME = "torch-nightly"
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric from . import BM_NAME DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.07 def run(control, treatment) -> TorchBenchABTestResult: control_env = control["environ"] control_env["git_commit_hash"] = control["environ"]["pytorch_git_version"] control_metr...
from pathlib import Path from typing import Any, Dict, List, Set, Tuple from torchbenchmark import load_model_by_name import torch from torch import _dynamo as torchdynamo from torch.optim import Adadelta, Adagrad, Adam, AdamW, Adamax, ASGD, SGD, RAdam, Rprop, RMSprop, NAdam, SparseAdam, LBFGS import torch.utils.benchm...
#!/bin/bash python3 ''' This script is intended for the CI context only! The whole purpose behind this script is to enable process/context/memory isolation across different models and devices. The OG script (which this script calls) is the userbenchmark/optim/run.py script, which is better documented and what is intend...
from typing import Optional from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.3 COMPILE_TIME_REGRESSION_DELTA_THRESHOLD = 2.0 def run(control, treatment) -> Optional[TorchBenchABTestResult]: control_env = control["environ"] treatment_env = treatment["env...
import argparse from datetime import datetime import git import numpy as np import os import json import subprocess import sys import time import shutil from pathlib import Path from ..utils import dump_output, get_output_dir, get_output_json, REPO_PATH from typing import List BM_NAME = "instruction-count" RESULT_JSO...
import logging import warnings from .torchbench import setup_torchbench_cwd, TorchBenchmarkRunner try: from .common import main except ImportError: from common import main from typing import List def run(args: List[str]): original_dir = setup_torchbench_cwd() logging.basicConfig(level=logging.WARNING...
#!/usr/bin/env python3 import gc import importlib import logging import os import re import sys import warnings from os.path import abspath, exists import torch from .common import BenchmarkRunner, main from ._dynamo.testing import collect_results, reduce_to_scalar_loss from ._dynamo.utils import clone_inputs # We a...
#!/usr/bin/env python3 from __future__ import annotations import argparse import collections import contextlib import copy import csv import dataclasses import functools import importlib import itertools import logging import os import pathlib import random import shutil import signal import subprocess import sys impo...