python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
# Configs for C2 BatherGather operator
batch_gather_configs_short = op_b... | pytorch-master | benchmarks/operator_benchmark/c2/batch_gather_test.py |
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""
# Configs for C2 add operator
add_long_configs = op_bench.cross... | pytorch-master | benchmarks/operator_benchmark/c2/add_test.py |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
quantile_op_long_configs = op_bench.cross_product_configs(
... | pytorch-master | benchmarks/operator_benchmark/c2/quantile_op_test.py |
pytorch-master | benchmarks/operator_benchmark/c2/__init__.py | |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
replace_nan_long_configs = op_bench.cross_product... | pytorch-master | benchmarks/operator_benchmark/c2/replace_nan_test.py |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""
# Configs for C2 BatchBoxCox operator
batch_box_cox_long_configs = op_bench.cross_product_configs(... | pytorch-master | benchmarks/operator_benchmark/c2/batch_box_cox_test.py |
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")
"""Microbenchmarks for ClipRanges operator."""
# Configs for C2 ClipR... | pytorch-master | benchmarks/operator_benchmark/c2/clip_ranges_test.py |
import operator_benchmark as op_bench
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""
# Configs for C2 Matmul operator
mm_long_configs = op_bench.cross_product_configs(
M=[8, 64, 128],
... | pytorch-master | benchmarks/operator_benchmark/c2/matmul_test.py |
import timeit
import torch
import torch.nn.functional as F
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._debug_set_fusion_group_inlining(False)
torch.set_num_threads(1)
def hardswish(x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
unary_ops = [
hardswish,
torch._C._nn.hardswish,
torch.... | pytorch-master | benchmarks/cpp/tensorexpr/bench_ops.py |
import argparse
import sys
import torch
import torch.utils.benchmark as benchmark_utils
try:
from benchmarks.fastrnns.factory import lstm_creator
except ImportError:
from caffe2.benchmarks.fastrnns.factory import lstm_creator
from torchvision.models import resnet50
def prepare_lstm_jit(bench_args):
mod... | pytorch-master | benchmarks/record_function_benchmark/record_function_bench.py |
import pandas
df = pandas.read_csv("perf.csv")
ops = pandas.unique(df["operator"])
nops = len(ops)
pivot_op_shape = df.pivot_table(
values="time", index=["operator", "shape"], columns=["fuser"]
)
pivot_speedups = (pivot_op_shape.T / pivot_op_shape["eager"]).T
import matplotlib.pyplot as plt
plt.rcParams["figure... | pytorch-master | benchmarks/fuser/plot_speedups.py |
import click
import sys
import time
import torch
import inspect
import itertools
torch.set_num_threads(1)
torch._C._debug_set_fusion_group_inlining(False)
def rand(*shape):
return torch.rand(*shape).mul(16).add(1)
# ------------------------------------------------------------------------------
# Shape test cas... | pytorch-master | benchmarks/fuser/run_benchmarks.py |
import argparse
import sys
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, m), nnz)
vector = torch.randn(m, dty... | pytorch-master | benchmarks/sparse/spmv.py |
if __name__ == "__main__":
pass
| pytorch-master | benchmarks/sparse/__init__.py |
import torch
import functools
import random
import operator
import numpy as np
import time
# shim for torch.cuda.Event when running on cpu
class Event(object):
def __init__(self, enable_timing):
pass
def record(self):
self.time = time.perf_counter()
def elapsed_time(self, end_event):
... | pytorch-master | benchmarks/sparse/utils.py |
import argparse
import sys
import torch
from utils import gen_sparse_csr, gen_sparse_coo, Event
def test_sparse_csr(m, n, k, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, k), nnz)
mat = torch.randn(k, n, dtype=torch.double)
... | pytorch-master | benchmarks/sparse/spmm.py |
if __name__ == "__main__":
pass
| pytorch-master | benchmarks/sparse/dlmc/__init__.py |
# Sparse benchmarks
# This benchmark is for sparse matmul performance test.
# They exist for comparing the performance of sparse matrix routines
# `sparse @ vector`, `sparse @ sparse` and `sparse @ dense` with different backends (CPU/CUDA)
# and with other frameworks such as scipy.
import sys
import argparse
import ... | pytorch-master | benchmarks/sparse/dlmc/matmul_bench.py |
import torch
from pathlib import Path
from scipy import sparse
import math
def to_coo_scipy(x):
indices_1 = x._indices().numpy()
values_1 = x._values().numpy()
return sparse.coo_matrix((values_1, (indices_1[0], indices_1[1])),
shape=x.shape)
def sparse_grad_output(a, b):
... | pytorch-master | benchmarks/sparse/dlmc/utils.py |
import argparse
import sys
import timeit
import torch
from torch.utils.benchmark import Timer
PARALLEL_TASKS_NUM = 4
INTERNAL_ITER = None
def loop_workload(x):
for i in range(INTERNAL_ITER):
x = torch.mm(x, x)
return x
def parallel_workload(x):
def parallel_task(x):
for i in range(int(INT... | pytorch-master | benchmarks/profiler_benchmark/profiler_bench.py |
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
for with_cuda in [False, True]:
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
sort_key = "self_cpu_memory_usage"
if with_cuda and torch.cuda.is_available():
model = model.cuda()
... | pytorch-master | benchmarks/profiler_benchmark/resnet_memory_profiler.py |
"""
This is a script for PyTorch Android custom selective build test. It prepares
MobileNetV2 TorchScript model, and dumps root ops used by the model for custom
build script to create a tailored build which only contains these used ops.
"""
import torch
import torchvision
import yaml
# Download and trace the model.
m... | pytorch-master | android/test_app/make_assets_custom.py |
import torch
import torchvision
print(torch.version.__version__)
resnet18 = torchvision.models.resnet18(pretrained=True)
resnet18.eval()
resnet18_traced = torch.jit.trace(resnet18, torch.rand(1, 3, 224, 224)).save("app/src/main/assets/resnet18.pt")
resnet50 = torchvision.models.resnet50(pretrained=True)
resnet50.eva... | pytorch-master | android/test_app/make_assets.py |
import torch
from torch import Tensor
from typing import Dict, List, Tuple, Optional
OUTPUT_DIR = "src/androidTest/assets/"
def scriptAndSave(module, fileName):
print('-' * 80)
script_module = torch.jit.script(module)
print(script_module.graph)
outputFileName = OUTPUT_DIR + fileName
# note that th... | pytorch-master | android/pytorch_android/generate_test_torchscripts.py |
## @package diagnose_protobuf
# Module scripts.diagnose_protobuf
"""Diagnoses the current protobuf situation.
Protocol buffer needs to be properly installed for Caffe2 to work, and
sometimes it is rather tricky. Specifically, we will need to have a
consistent version between C++ and python simultaneously. This is a
co... | pytorch-master | scripts/diagnose_protobuf.py |
## @package get_python_cmake_flags
# Module scripts.get_python_cmake_flags
##############################################################################
# Use this script to find your preferred python installation.
##############################################################################
#
# You can use the follo... | pytorch-master | scripts/get_python_cmake_flags.py |
import unittest
import tempfile
from commitlist import CommitList
class TestCommitList(unittest.TestCase):
def test_create_new(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f'{tempdir}/commitlist.csv'
commit_list = CommitList.create_new(commit_list_path, '... | pytorch-master | scripts/release_notes/test_release_notes.py |
from collections import namedtuple
from pathlib import Path
import locale
import subprocess
import re
import requests
import os
import json
categories = [
'Uncategorized',
'distributed',
'lazy',
'hub',
'mobile',
'jit',
'visualization',
'onnx',
'caffe2',
'quantization',
'amd'... | pytorch-master | scripts/release_notes/common.py |
# Quick scipt to apply categorized items to the
# base commitlist . Useful if you are refactoring any code
# but want to keep the previous data on categories
import commitlist
import csv
category_csv = "results/category_data.csv"
commitlist_csv = "results/commitlist.csv"
with open(category_csv, "r") as category_data... | pytorch-master | scripts/release_notes/apply_categories.py |
import argparse
import os
import textwrap
from common import categories, topics, get_commit_data_cache
from commitlist import CommitList
class Categorizer:
def __init__(self, path, category='Uncategorized'):
self.cache = get_commit_data_cache()
self.commits = CommitList.from_existing(path)
... | pytorch-master | scripts/release_notes/categorize.py |
import argparse
import torch
from os import path
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
"",
"nn",
"nn.functional",
"nn.init",
"optim",
"autograd",
"cuda",
"sparse",
"distributi... | pytorch-master | scripts/release_notes/namespace_check.py |
import argparse
from common import run, topics, get_features
from collections import defaultdict
import os
from pathlib import Path
import csv
import pprint
from common import get_commit_data_cache, features_to_dict
import re
import dataclasses
from typing import List
"""
Example Usages
Create a new commitlist for c... | pytorch-master | scripts/release_notes/commitlist.py |
#! /usr/bin/env python3
import onnx.backend
import argparse
import caffe2.python.workspace as c2_workspace
import glob
import json
import numpy as np
import onnx
import caffe2.python.onnx.frontend
import caffe2.python.onnx.backend
import os
import shutil
import tarfile
import tempfile
import boto3
from six.moves.ur... | pytorch-master | scripts/model_zoo/update-models-from-caffe2.py |
#! /usr/bin/env python3
import os
import subprocess
import sys
import tarfile
import tempfile
from six.moves.urllib.request import urlretrieve
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
class SomeClass:
# largely copied from
# https://github.com/onnx/onn... | pytorch-master | scripts/model_zoo/update-caffe2-models.py |
import argparse
import functools
import traceback
from torch.utils.jit.log_extract import extract_ir, load_graph_and_inputs, run_baseline_no_fusion, run_nnc, run_nvfuser
from typing import List, Tuple, Callable, Optional
'''
Usage:
1. Run your script and pipe into a log file
PYTORCH_JIT_LOG_LEVEL=">>graph_fuser" pyt... | pytorch-master | scripts/jit/log_extract.py |
#!/usr/bin/env python3
"""
This script finds the user/pr creator responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the user only, not the r... | pytorch-master | .github/scripts/process_commit.py |
from unittest import TestCase, mock, main
from test_trymerge import mocked_gh_graphql
from trymerge import GitHubPR
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from typing import Any
from tryrebase import rebase_onto
class TestRebase(TestCase):
@mock.patch('trymerge.gh_graphql', side_effe... | pytorch-master | .github/scripts/test_tryrebase.py |
#!/usr/bin/env python3
import base64
import json
import os
import re
import time
import urllib.parse
from dataclasses import dataclass
from datetime import datetime
from functools import lru_cache
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Pattern,
Tuple,
Un... | pytorch-master | .github/scripts/trymerge.py |
import os
import re
from typing import List, Pattern, Tuple, Optional
BOT_COMMANDS_WIKI = "https://github.com/pytorch/pytorch/wiki/Bot-commands"
CIFLOW_LABEL = re.compile(r"^ciflow/.+")
CIFLOW_TRUNK_LABEL = re.compile(r"^ciflow/trunk")
OFFICE_HOURS_LINK = "https://github.com/pytorch/pytorch/wiki/Dev-Infra-Office-Ho... | pytorch-master | .github/scripts/trymerge_explainer.py |
#!/usr/bin/env python3
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import Dict, Set, List, Iterable
import jinja2
import os
import sys
from typing_extensions import Literal, TypedDict
import generate_binary_build_matrix # type: ignore[import]
Arch = Literal["windows", "li... | pytorch-master | .github/scripts/generate_ci_workflows.py |
import json
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import NamedTuple, Optional
# From: https://docs.github.com/en/rest/reference/checks
class GitHubAnnotationLevel(str, Enum):
NOTICE = "notice"
WARNING = "warning"
FAILURE = "failure"
class GitHubAnnotatio... | pytorch-master | .github/scripts/convert_lintrunner_annotations_to_github.py |
#!/usr/bin/env python3
import argparse
import os
import subprocess
import re
from datetime import datetime
from distutils.util import strtobool
from pathlib import Path
LEADING_V_PATTERN = re.compile("^v")
TRAILING_RC_PATTERN = re.compile("-rc[0-9]*$")
LEGACY_BASE_VERSION_SUFFIX_PATTERN = re.compile("a0$")
class No... | pytorch-master | .github/scripts/generate_pytorch_version.py |
#!/usr/bin/env python3
# Tests implemented in this file are relying on GitHub GraphQL APIs
# In order to avoid test flakiness, results of the queries
# are cached in gql_mocks.json
# PyTorch Lint workflow does not have GITHUB_TOKEN defined to avoid
# flakiness, so if you are making changes to merge_rules or
# GraphQL q... | pytorch-master | .github/scripts/test_trymerge.py |
#!/usr/bin/env python3
import argparse
import sys
import yaml
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
WORKFLOWS = REPO_ROOT / ".github" / "workflows"
EXPECTED_GROUP = "${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}" \
"-${{ github.event_na... | pytorch-master | .github/scripts/ensure_actions_will_cancel.py |
from unittest import TestCase, main, mock
from typing import Any, List, Dict
from fetch_latest_green_commit import isGreen, WorkflowCheck
workflowNames = [
"pull",
"trunk",
"Lint",
"linux-binary-libtorch-pre-cxx11",
"android-tests",
"windows-binary-wheel",
"periodic",
"docker-release-bu... | pytorch-master | .github/scripts/test_fetch_latest_green_commit.py |
"""
Generate a torchbench test report from a file containing the PR body.
Currently, only supports running tests on specified model names
Testing environment:
- Intel Xeon 8259CL @ 2.50 GHz, 24 Cores with disabled Turbo and HT
- Nvidia Tesla T4
- Nvidia Driver 470.82.01
- Python 3.8
- CUDA 11.3
"""
# Known issues:
# 1... | pytorch-master | .github/scripts/run_torchbench.py |
#!/usr/bin/env python3
import os
import re
import tempfile
from collections import defaultdict
from datetime import datetime
from typing import cast, Any, Dict, Iterator, List, Optional, Tuple, Union
RE_GITHUB_URL_MATCH = re.compile("^https://.*@?github.com/(.+)/(.+)$")
def get_git_remote_name() -> str:
return... | pytorch-master | .github/scripts/gitutils.py |
#!/usr/bin/env python3
"""Generates a matrix to be utilized through github actions
Will output a condensed version of the matrix if on a pull request that only
includes the latest version of python we support built on three different
architectures:
* CPU
* Latest CUDA
* Latest ROCM
"""
from typing import... | pytorch-master | .github/scripts/generate_binary_build_matrix.py |
#!/usr/bin/env python3
import os
import subprocess
import sys
import re
from typing import Any
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from trymerge import gh_post_pr_comment as gh_post_comment, GitHubPR
def parse_args() -> Any:
from argparse import ArgumentParser
parser = Argumen... | pytorch-master | .github/scripts/tryrebase.py |
#!/usr/bin/env python3
'''
Test ownership was introduced in https://github.com/pytorch/pytorch/issues/66232.
As a part of enforcing test ownership, we want to maintain a list of existing PyTorch labels
to verify the owners' existence. This script outputs a file containing a list of existing
pytorch/pytorch labels so t... | pytorch-master | .github/scripts/export_pytorch_labels.py |
#!/usr/bin/env python3
import os
import re
def main() -> None:
ref = os.environ['GITHUB_REF']
m = re.match(r'^refs/(\w+)/(.*)$', ref)
if m:
category, stripped = m.groups()
if category == 'heads':
print(f'::set-output name=branch::{stripped}')
elif category == 'pull':
... | pytorch-master | .github/scripts/parse_ref.py |
import json
import os
import subprocess
import requests
from typing import Any, Dict
from argparse import ArgumentParser
MERGEBOT_TOKEN = os.environ["MERGEBOT_TOKEN"]
PYTORCHBOT_TOKEN = os.environ["PYTORCHBOT_TOKEN"]
OWNER, REPO = "pytorch", "pytorch"
def git_api(
url: str, params: Dict[str, str], type: str = "g... | pytorch-master | .github/scripts/update_commit_hashes.py |
#!/usr/bin/env python3
from gitutils import PeekableIterator, patterns_to_regex
from unittest import TestCase, main
class TestPeekableIterator(TestCase):
def test_iterator(self, input_: str = "abcdef") -> None:
iter_ = PeekableIterator(input_)
for idx, c in enumerate(iter_):
self.assert... | pytorch-master | .github/scripts/test_gitutils.py |
import sys
from typing import Any, Dict, List, NamedTuple, Tuple
from gitutils import _check_output
import rockset # type: ignore[import]
import os
import re
def eprint(msg: str) -> None:
print(msg, file=sys.stderr)
class WorkflowCheck(NamedTuple):
workflowName: str
name: str
jobName: str
conclu... | pytorch-master | .github/scripts/fetch_latest_green_commit.py |
# Helper to get the id of the currently running job in a GitHub Actions
# workflow. GitHub does not provide this information to workflow runs, so we
# need to figure it out based on what they *do* provide.
import requests
import os
import argparse
# Our strategy is to retrieve the parent workflow run, then filter its... | pytorch-master | .github/scripts/get_workflow_job_id.py |
#!/usr/bin/env python3
'''
Verify that it is possible to round-trip native_functions.yaml via ruamel under some
configuration. Keeping native_functions.yaml consistent in this way allows us to
run codemods on the file using ruamel without introducing line noise. Note that we don't
want to normalize the YAML file, as ... | pytorch-master | .github/scripts/lint_native_functions.py |
pytorch-master | aten/src/ATen/function_wrapper.py | |
#!/usr/bin/env python3
import argparse
import glob
import sys
import os
from torchgen.code_template import CodeTemplate
H_NAME = "glsl.h"
CPP_NAME = "glsl.cpp"
DEFAULT_ENV = {"precision": "highp", "format": "rgba32f"}
def findAllGlsls(path):
vexs = glob.glob(os.path.join(path, '**', '*.glsl'), recursive=True)
... | pytorch-master | aten/src/ATen/gen_vulkan_glsl.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import confu
from confu import arm, x86
parser = confu.standard_parser()
def main(ar... | pytorch-master | aten/src/ATen/native/quantized/cpu/qnnpack/configure.py |
import os
QNNPACK_SOURCES = {
# Generic functions
None: [
"requantization/fp32-psimd.c",
"requantization/fp32-scalar.c",
"requantization/gemmlowp-scalar.c",
"requantization/precise-psimd.c",
"requantization/precise-scalar.c",
"requantization/q31-scalar.c",
... | pytorch-master | aten/src/ATen/native/quantized/cpu/qnnpack/generate-wrapper.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import confu
parser = confu.standard_parser("clog configuration script")
def main(args... | pytorch-master | aten/src/ATen/native/quantized/cpu/qnnpack/deps/clog/configure.py |
#!/usr/bin/env python3
"""
Code generator for NNAPI wrapper. We can't link directly against
libneuralnetworks.so because we want PyTorch to work on Android
devices that don't have it available. Instead, we generate a wrapper
that opens libneuralnetworks.so with dlopen and finds the functions
we need with dlsym. We a... | pytorch-master | aten/src/ATen/nnapi/codegen.py |
from collections import defaultdict
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torchgen.api.dispatcher as dispatcher
from torchgen.api.translate import translate
from torchgen.api.types import Binding, DispatcherSignature, Expr
from torchgen.context import with_native_function
from torchge... | pytorch-master | torchgen/native_function_generation.py |
import textwrap
from dataclasses import dataclass
from typing import List, Optional, Sequence, Tuple
from torchgen.api.translate import translate
from torchgen.api.types import DispatcherSignature
from torchgen.context import method_with_native_function
from torchgen.model import (
Argument,
BaseTy,
BaseTy... | pytorch-master | torchgen/gen_vmap_plumbing.py |
import argparse
import functools
import json
import os
import pathlib
from collections import defaultdict, namedtuple, OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, TypeVar, Union
import yaml
from typing_extensions import Literal
import torchgen.api.... | pytorch-master | torchgen/gen.py |
import threading
from contextlib import contextmanager
from typing import Iterator, Optional
# Simple dynamic scoping implementation. The name "parametrize" comes
# from Racket.
#
# WARNING WARNING: LOOKING TO EDIT THIS FILE? Think carefully about
# why you need to add a toggle to the global behavior of code
# gener... | pytorch-master | torchgen/local.py |
from typing import Callable, List, Optional, Tuple, Union
from torchgen.api import cpp, dispatcher
from torchgen.api.translate import translate
from torchgen.api.types import (
BaseCType,
Binding,
CType,
DispatcherSignature,
FunctionalizationLambda,
NativeSignature,
tensorListT,
tensorT... | pytorch-master | torchgen/gen_functionalization_type.py |
import re
from typing import Mapping, Match, Optional, Sequence
# match $identifier or ${identifier} and replace with value in env
# If this identifier is at the beginning of whitespace on a line
# and its value is a list then it is treated as
# block substitution by indenting to that depth and putting each element
# ... | pytorch-master | torchgen/code_template.py |
"""torchgen
This module contains codegeneration utilities for PyTorch. It is used to
build PyTorch from source, but may also be used for out-of-tree projects
that extend PyTorch.
Note well that we provide no BC guarantees for torchgen. If you're interested
in using torchgen and want the PyTorch team to be aware, plea... | pytorch-master | torchgen/__init__.py |
import dataclasses
import itertools
import re
from dataclasses import dataclass
from enum import auto, Enum
from typing import Callable, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Union
from torchgen.utils import assert_never, NamespaceHelper, OrderedSet
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~... | pytorch-master | torchgen/model.py |
import contextlib
import functools
from typing import Callable, Dict, Iterator, Optional, TypeVar, Union
import torchgen.local as local
from torchgen.model import (
BackendIndex,
DispatchKey,
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
)
from torchgen.utils import context, S, T... | pytorch-master | torchgen/context.py |
import contextlib
import functools
import hashlib
import os
import re
import sys
import textwrap
from argparse import Namespace
from dataclasses import fields, is_dataclass
from enum import Enum
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
NoReturn,
Op... | pytorch-master | torchgen/utils.py |
import argparse
import os
import pathlib
import re
from collections import Counter, defaultdict, namedtuple
from typing import Dict, List, Optional, Sequence, Union
import yaml
import torchgen.api.dispatcher as dispatcher
import torchgen.dest as dest
from torchgen.api.types import DispatcherSignature
from torchgen.co... | pytorch-master | torchgen/gen_backend_stubs.py |
import argparse
import os
import pathlib
import re
from collections import Counter, namedtuple
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import yaml
import torchgen.dest as dest
from torchgen.api.lazy impo... | pytorch-master | torchgen/gen_lazy_tensor.py |
from dataclasses import dataclass
from typing import Dict, List, Optional, Set, Tuple
import yaml
from torchgen.model import NativeFunction
from torchgen.selective_build.operator import (
merge_debug_info,
merge_operator_dicts,
SelectiveBuildOperator,
strip_operator_overload_name,
)
# A SelectiveBuil... | pytorch-master | torchgen/selective_build/selector.py |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
# This class holds information about a single operator used to determine
# the outcome of a selective/custom PyTorch build that doesn't include
# registration code for all the supported operators. This is done to
# reduce the size of the genera... | pytorch-master | torchgen/selective_build/operator.py |
pytorch-master | torchgen/selective_build/__init__.py | |
from typing import Dict, Union
from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
if isinstance(g, NativeFunctionsGroup):
return str(g.functional.func.name.name.base)
else:
return s... | pytorch-master | torchgen/static_runtime/config.py |
pytorch-master | torchgen/static_runtime/__init__.py | |
import json
import logging
import math
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torchgen.api.cpp as cpp
from torchgen.context import native_function_manager
from torchgen.model import (
Argument,
BackendIndex,
BaseTy,
BaseType,
FunctionSchema,
NativeFunctionsGroup... | pytorch-master | torchgen/static_runtime/generator.py |
import argparse
import itertools
import os
from typing import Sequence, TypeVar, Union
from libfb.py.log import set_simple_logging # type: ignore[import]
from torchgen import gen
from torchgen.context import native_function_manager
from torchgen.model import DispatchKey, NativeFunctionsGroup, NativeFunctionsViewGrou... | pytorch-master | torchgen/static_runtime/gen_static_runtime_ops.py |
pytorch-master | torchgen/operator_versions/__init__.py | |
MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
*/
"""
| pytorch-master | torchgen/operator_versions/gen_mobile_upgraders_constant.py |
#!/usr/bin/env python3
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List
import torch
from torch.jit.generate_bytecode import generate_upgraders_bytecode
from torchgen.code_template import CodeTemplate
from torchgen.operator_versions.gen_mobile_upgraders_constant import (
... | pytorch-master | torchgen/operator_versions/gen_mobile_upgraders.py |
#!/usr/bin/env python3
import os
from itertools import chain
from pathlib import Path
from torch.jit._shape_functions import (
bounded_compute_graph_mapping,
shape_compute_graph_mapping,
)
SHAPE_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-genera... | pytorch-master | torchgen/shape_functions/gen_jit_shape_functions.py |
#!/usr/bin/env python3
import os
from pathlib import Path
from torch.jit._decompositions import decomposition_table
# from torchgen.code_template import CodeTemplate
DECOMP_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytor... | pytorch-master | torchgen/decompositions/gen_jit_decompositions.py |
from torchgen.api.lazy import LazyIrSchema
from torchgen.api.types import OptionalCType
def ts_lowering_body(schema: LazyIrSchema) -> str:
# for now, we just want one IR class decl and soon after also the method defs
# and we use the functional version not out/inplace.
emplace_arguments = []
for arg i... | pytorch-master | torchgen/dest/lazy_ts_lowering.py |
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union
import torchgen.api.ufunc as ufunc
from torchgen.api.translate import translate
from torchgen.api.types import (
BaseCType,
Binding,
CType,
Expr,
NamedCType,
opmath_t,
scalar_t,
StructuredI... | pytorch-master | torchgen/dest/ufunc.py |
from .lazy_ir import (
generate_non_native_lazy_ir_nodes as generate_non_native_lazy_ir_nodes,
GenLazyIR as GenLazyIR,
GenLazyNativeFuncDefinition as GenLazyNativeFuncDefinition,
GenLazyShapeInferenceDefinition as GenLazyShapeInferenceDefinition,
)
from .native_functions import (
compute_native_func... | pytorch-master | torchgen/dest/__init__.py |
from typing import List, Optional, Union
import torchgen.api.meta as meta
import torchgen.api.structured as structured
from torchgen.api.types import kernel_signature
from torchgen.context import with_native_function_and_index
from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
from torchgen... | pytorch-master | torchgen/dest/native_functions.py |
import itertools
from abc import ABC
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import torchgen.api.dispatcher as dispatcher
from torchgen.api.lazy import (
getValueT,
isValueType,
LazyArgument,
LazyIrProperties,
LazyIrSchema,
tensorListValueT,
... | pytorch-master | torchgen/dest/lazy_ir.py |
import itertools
import textwrap
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from typing_extensions import Literal
import torchgen.api.cpp as cpp
import torchgen.api.meta as meta
import torchgen.api.structured as structured
from torchgen.api.translate import translate
from torchg... | pytorch-master | torchgen/dest/register_dispatch_key.py |
import itertools
from typing import List, Sequence, Union
from torchgen.api import cpp
from torchgen.api.types import ArgName, Binding, CType, NamedCType
from torchgen.model import (
Argument,
FunctionSchema,
Return,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.utils import asse... | pytorch-master | torchgen/api/dispatcher.py |
from typing import Dict, List, NoReturn, Sequence, Union
from torchgen.api.types import (
BaseCType,
Binding,
boolT,
ConstRefCType,
deviceT,
Expr,
intArrayRefT,
iOptTensorListRefT,
iTensorListRefT,
layoutT,
ListCType,
longT,
memoryFormatT,
MutRefCType,
NamedC... | pytorch-master | torchgen/api/translate.py |
from dataclasses import dataclass
from typing import List, Optional
import torchgen.api.types as api_types
from torchgen.api import cpp, structured
from torchgen.api.types import (
ArgName,
BaseCppType,
BaseCType,
Binding,
ConstRefCType,
CType,
NamedCType,
scalarT,
)
from torchgen.mode... | pytorch-master | torchgen/api/ufunc.py |
pytorch-master | torchgen/api/__init__.py | |
from typing import List, Optional, Sequence, Union
from torchgen import local
from torchgen.api import cpp
from torchgen.api.types import (
ArgName,
BaseCType,
Binding,
boolT,
ConstRefCType,
CType,
deviceT,
layoutT,
ListCType,
MutRefCType,
NamedCType,
OptionalCType,
... | pytorch-master | torchgen/api/native.py |
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Sequence, Set, TypeVar, Union
from torchgen.model import (
Argument,
BackendIndex,
BaseTy,
FunctionSchema,
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
ScalarType,
Sel... | pytorch-master | torchgen/api/types.py |
from typing import List, Optional, Sequence, Set, Union
from torchgen import local
from torchgen.api.types import (
ArgName,
ArrayCType,
ArrayRefCType,
BaseCType,
BaseTypeToCppMapping,
Binding,
boolT,
ConstRefCType,
CType,
dimnameListT,
intArrayRefT,
ListCType,
longT... | pytorch-master | torchgen/api/cpp.py |
from typing import List, Tuple
from torchgen.api import cpp
from torchgen.api.types import Binding, CppSignatureGroup, CType
from torchgen.model import (
Argument,
BaseTy,
BaseType,
ListType,
NativeFunction,
OptionalType,
Type,
)
# This file generates the code for unboxing wrappers, i.e., ... | pytorch-master | torchgen/api/unboxing.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.