python_code stringlengths 0 229k |
|---|
from .utils import *
|
#!/usr/bin/env python3
IGNORE_ID = -1
def pad_list(xs, pad_value):
# From: espnet/src/nets/e2e_asr_th.py: pad_list()
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, * xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, :xs[i].size(0)]... |
from .data import *
|
"""
Logic:
1. AudioDataLoader generate a minibatch from AudioDataset, the size of this
minibatch is AudioDataLoader's batchsize. For now, we always set
AudioDataLoader's batchsize as 1. The real minibatch size we care about is
set in AudioDataset's __init__(...). So actually, we generate the
information of... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import List
import torch
from .tokenizer import Tokenizer
from .model import Transformer
class LLaMA:
def __init__(self, model: Transf... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from .model import ModelArgs, Transformer
import torch
class... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import Optional, Tuple
from dataclasses import dataclass
import math
import torch
from torch import nn
import torch.nn.functional as F
@dat... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from sentencepiece import SentencePieceProcessor
from logging import getLogger
from typing import List
import os
logger = getLogger()
class Tokenizer:... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements() |
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
... |
"""
Generate a fully specified benchmark configuration file, given a lightweight
specification and a complete source of benchmark data.
Specification File
------------------
Score hierarchy input intended to be as easy to construct as possible,
relying on automatic inference of unspecified weights, benchmark configs,
... |
"""
Compute TorchBench Score V2.
"""
import re
import math
import yaml
import importlib
import itertools
from pathlib import Path
from typing import List, Optional
TORCHBENCH_V2_REF_DATA = Path(__file__).parent.joinpath("configs/v2/config-v2.yaml")
TORCHBENCH_V2_DEFAULT_THRESHOLD = 0.07
TORCHBENCH_V2_DEFAULT_TARGET = ... |
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
TARGET_SCORE_DEFAULT = 1000
... |
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from enum import Enum
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
from ty... |
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
from .generate_score_config ... |
from accelerate.utils.dataclasses import DeepSpeedPlugin
import torch
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.utils.data import DataLoader
from torchbenchmark.util.e2emodel ... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
import math
import os
from pathlib import Path
from torch.utils.data import DataLoader
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoToken... |
from accelerate.utils.dataclasses import DeepSpeedPlugin
import functools
import torch
import numpy as np
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap impor... |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# upstream repo: https://github.com/kuangliu/pytorch-cifar
import torch
import torchvision
import torchvision.transforms as transforms
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
import os
from tqdm import tqdm
from pathlib import Path
# setup environmen... |
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
... |
import os
import sys
import torch
import subprocess
from pathlib import Path
from dataclasses import dataclass
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from typing import Optional, List
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
FAMBENCH_ROOT = CURRENT_DIR.parent.parent.parent.... |
import sys
import subprocess
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import importlib
import sys
from urllib import request
from typing import List, Dict
TORCH_DEPS = ['torch', 'torchvision', 'torchaudio']
proxy_suggestion = "Unable to verify https connectivity, " \
"required for setup.\n" \
"Do you need to use a proxy?"
class add_path():
def... |
"""gitutils.py
Utils for getting git-related information.
"""
import git
import re
import os
import time
import subprocess
from datetime import datetime
from typing import Optional, List
# Assume the nightly branch commit message is in the following format
# Hash in the parentheses links to the commit on the master ... |
from typing import Any, List, Optional
import boto3
import os
import json
import yaml
from pathlib import Path
USERBENCHMARK_S3_BUCKET = "ossci-metrics"
USERBENCHMARK_S3_OBJECT = "torchbench-userbenchmark"
REPO_ROOT = Path(__file__).parent.parent
class S3Client:
def __init__(self, bucket, object):
self.s3... |
"""
Utilities for building pytorch and torch* domain packages
"""
import os
import sys
import shutil
import subprocess
from dataclasses import dataclass
from pathlib import Path
from typing import List, Dict
CLEANUP_ROUND = 5
@dataclass
class TorchRepo:
name: str
origin_url: str
main_branch: str
src_p... |
import argparse
import subprocess
DEFAULT_PYTHON_VERSION = "3.10"
PYTHON_VERSION_MAP = {
"3.8": {
"pytorch_url": "cp38",
},
"3.10": {
"pytorch_url": "cp310",
},
"3.11": {
"pytorch_url": "cp311",
},
}
def create_conda_env(pyver: str, name: str):
command = [ "conda",... |
import os
import re
import importlib
import argparse
import subprocess
from pathlib import Path
from typing import Optional
# defines the default CUDA version to compile against
DEFAULT_CUDA_VERSION = "11.8"
CUDA_VERSION_MAP = {
"11.8": {
"pytorch_url": "cu118",
"magma_version": "magma-cuda118"... |
"""Add Task abstraction to reduce the friction of controlling a remote worker."""
import abc
import ast
import functools
import inspect
import marshal
import textwrap
import typing
from components._impl.workers import base
class TaskBase(abc.ABC):
"""Convenience layer to allow methods to be called in a worker.
... |
import marshal
import textwrap
import typing
from components._impl.workers import base
class InProcessWorker(base.WorkerBase):
"""Worker which reuses the current Python process.
The implementation of this worker borrows from the builtin `timeit.Timer`
class, and simply reuses the current interpreter. (M... |
import contextlib
import datetime
import io
import os
import marshal
import pathlib
import shutil
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import typing
from pathlib import Path
import components
from components._impl.workers import base
from components._impl.workers impor... |
"""Utilities to handle communication between parent worker.
This module implements three principle facilities:
1) Raw IPC (via the Pipe class)
2) Exception propagation (via the SerializedException class)
3) A run loop for the worker (via the run_loop function)
"""
import contextlib
import dataclasses
impor... |
import abc
import ast
import typing
class WorkerBase(abc.ABC):
"""Interface for the core worker abstraction.
Conceptually, a worker is modeled as a remote interactive Python terminal.
One can send code to be executed (analogous to writing to stdin), and
perform basic stores and loads (analogous to RP... |
"""Unit tests specifically for the components of SubprocessWorker.
End-to-end tests (e.g. does SubprocessWorker properly implement the
WorkerBase API) still live in `test_worker`.
"""
import functools
import os
import sys
import textwrap
import threading
import typing
from torch.testing._internal.common_utils import... |
import os
import subprocess
import sys
import signal
import textwrap
import typing
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
try:
from components._impl.workers import base as base_worker
from components._impl.workers import in_process_worker
from components._impl.wo... |
"""
This is a test file for TorchBenchAnalyzer
"""
from model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
def work():
# A simple mm test
import torch
n=4096
x = torch.ones((n, n), dtype=torch.float32, device="cuda")
y = torch.ones((n, n),dtype=torch.float32, device="cuda")
# configure mo... |
from typing import Optional, OrderedDict, Tuple
from .dcgm.cpu_monitor import CPUMonitor
from .dcgm.dcgm_monitor import DCGMMonitor
from .dcgm.nvml_monitor import NVMLMonitor
from .tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException
from .tb_dcgm_types.gpu_device_factory import GPUDeviceFactory
from .dcgm ... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
import os
import time
from .monitor import Monitor
import psutil
from ..tb_dcgm_types.cpu_peak_memory import CPUPeakMemory
class CPUMonitor(Monitor):
"""
A CPU monitor that uses psutil to monitor CPU usage
"""
def __init__(self, frequency, metrics_needed=[], monitored_pid=None):
super().__init... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
import time
from .monitor import Monitor
from ..tb_dcgm_types.gpu_free_memory import GPUFreeMemory
from ..tb_dcgm_types.gpu_tensoractive import GPUTensorActive
from ..tb_dcgm_types.gpu_peak_memory import GPUPeakMemory
from ..tb_dcgm_types.gpu_utilization import GPUUtilization
from ..tb_dcgm_types.gpu_power_usage import... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under ... |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPCIERX(GPURecord):
"""
GPU PCIe RX Bytes record. The number of bytes of active PCIe rx (read) data including both header and payload.
Note that this is from the perspective of the GPU, so copying data from host... |
import logging
LOGGER_NAME = 'TorchBenchLogger'
def set_logger(logger_level=logging.WARNING):
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(LOGGER_NAME)
lo... |
# default is 0.01 second
DEFAULT_MONITORING_INTERVAL = 0.01
class AnalayzerConfig:
def __init__(self):
self.monitoring_interval = DEFAULT_MONITORING_INTERVAL |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPCIETX(GPURecord):
"""
GPU PCIe TX Bytes record. The number of bytes of active PCIe tx (transmit) data including both header and payload.
Note that this is from the perspective of the GPU, so copying data from ... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
from .record import Record
class CPURecord(Record):
"""
This is a base class for any
CPU based record
"""
def __init__(self, value, timestamp=0):
"""
Parameters
----------
value : float
The value of the CPU metrtic
device_uuid : str
... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
from functools import total_ordering
from .cpu_record import CPURecord
@total_ordering
class CPUPeakMemory(CPURecord):
"""
The peak memory usage in the CPU.
"""
tag = "cpu_peak_memory"
def __init__(self, value, timestamp=0):
"""
Parameters
----------
value : float... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUDRAMActive(GPURecord):
"""
GPU DRAM active record
"""
tag = "gpu_dramactive"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value :... |
class TorchBenchAnalyzerException(Exception):
"""
A custom exception specific to the TorchBench Model Analyzer
"""
pass
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applic... |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUFP32Active(GPURecord):
"""
GPU FP32 active record
"""
tag = "gpu_fp32active"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value :... |
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import multipr... |
import argparse
import pathlib
import yaml
CORE_MODEL_PATH = pathlib.Path(__file__).parent.parent.absolute().joinpath("torchbenchmark", "models")
def get_model_list():
models = list(map(lambda x: x.name, filter(lambda x: x.is_dir(), CORE_MODEL_PATH.iterdir())))
return models
def check_csv_file(csv_file, know... |
"""
Dump the contents of a pytest benchmark .json file.
"""
import argparse
import json
from tabulate import tabulate
def print_benchmark_stats(data):
print_stats = ['min', 'max', 'mean', 'stddev', 'rounds', 'median']
headers = ['name'] + print_stats
rows = []
for benchmark in data['benchmarks']:
... |
import pathlib
import torch
from typing import Optional, List, Tuple
from torchbenchmark import ModelTask
import os
import sys
import time
import numpy
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
from run_sweep import WORKER_TIMEOUT, WARMUP_ROUNDS, ModelTestResult, NANOSECONDS_PER_MILLISECOND... |
"""Scribe Uploader for Pytorch Benchmark V2 Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import mult... |
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports userbenchmark result json file.
"""
import argparse
import time
import json
import os
import requests
from collections import defaultdict
from datetime import datetime
def get_metrics_date_from_file(fname: str) -> str:
bname = os.path.basename(fnam... |
import argparse
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
... |
"""
This script reads from a PyTorch benchmarking directory and generates a yaml file
that drives the bisector to run tests on the specified PyTorch commits.
This only works on V1 and later benchmark, V0 is not supported.
"""
import os
import re
import git
import json
import yaml
import argparse
import dataclasses
from... |
import os
import re
import tabulate
import argparse
MAGIC_PREFIX = "STABLE_TEST_MODEL: "
THRESHOLD = 7
def _parse_pr_body(body):
magic_lines = list(filter(lambda x: MAGIC_PREFIX == x[:len(MAGIC_PREFIX)], body.splitlines()))
if len(magic_lines):
return magic_lines[-1][len(MAGIC_PREFIX):].strip()
def _... |
"""
This script runs userbenchmarks abtest upon two PyTorch versions.
"""
import argparse
import os
import subprocess
import shutil
import sys
import json
from pathlib import Path
from bmutils import REPO_ROOT, add_path
from typing import Dict, Optional
with add_path(REPO_ROOT):
import torchbenchmark.util.gitutils... |
"""
Script that runs torchbench with a benchmarking config.
The configs are located within the configs/ directory.
For example, the default config we use is `torchdynamo/eager-overhead`
"""
import re
import sys
import os
import yaml
import argparse
import subprocess
import itertools
from dataclasses import dataclass
fr... |
import argparse
import os
import json
import yaml
from pathlib import Path
WORKFLOW_LINK_TEMPLATE = "https://github.com/pytorch/benchmark/actions/runs/"
def check_env(bisection_root: str):
"Check `bisection_root` contains bisection config file, github issue file, and result json."
# gh-issue.md exists
# r... |
import sys
from pathlib import Path
CURRENT_DIR = Path(__file__).parent
REPO_ROOT = str(CURRENT_DIR.parent.parent.parent)
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
... |
import json
import os
import re
from pathlib import Path
import argparse
ATTRIBUTES = ["batch_size", "precision"]
def get_nonempty_json(d):
r = []
for f in filter(lambda x: x.endswith(".json"), os.listdir(d)):
fullpath = os.path.join(d, f)
if os.stat(fullpath).st_size:
r.append(ful... |
import argparse
import sys
import subprocess
from pathlib import Path
from aicluster import run_aicluster_benchmark
REPO_ROOT = Path(__file__).parent.parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)... |
"""
The script to upload TorchBench CI result from S3 to Scribe (Internal).
To run this script, users need to set two environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
It assumes the following hierarchy of the result directory:
torchbench-aicluster-metrics/
|-distributed
|-metrics-20220805192500.js... |
from pathlib import Path
from typing import List
CURRENT_DIR = Path(__file__).parent
def list_userbenchmarks() -> List[str]:
ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ]
ub_names = list(map(lambda x: x.name, ub_dirs))
return ub_names
def get_ci_from_u... |
import os
import sys
import yaml
from datetime import datetime, timedelta
import time
import json
import yaml
from pathlib import Path
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Callable
REPO_PATH = Path(os.path.abspath(__file__)).parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ... |
import itertools
import time
from datetime import datetime
from typing import List
import json
import numpy as np
import argparse
import re
import torch
from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator ... |
import itertools
import time
from datetime import datetime
from typing import List
import yaml
import json
import numpy as np
import argparse
from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output
with add_path(REPO_PATH):
from components._impl.workers.subprocess_rpc import Unserial... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.