python_code stringlengths 0 1.02M | repo_name stringlengths 9 48 | file_path stringlengths 5 114 |
|---|---|---|
from typing import List, Optional
from torchgen.api import dispatcher
from torchgen.api.types import (
BaseCType,
Binding,
boolT,
ConstRefCType,
CType,
longT,
NamedCType,
tensorT,
)
from torchgen.model import (
Argument,
BaseTy,
BaseType,
FunctionSchema,
NativeFuncti... | pytorch-master | torchgen/api/functionalization.py |
import re
from dataclasses import dataclass
from typing import Dict, List, Match, Optional, Sequence, Set, Tuple
from torchgen.api import cpp
from torchgen.api.types import Binding, NamedCType
from torchgen.model import (
FunctionSchema,
NativeFunction,
NativeFunctionsViewGroup,
SchemaKind,
Type,
)... | pytorch-master | torchgen/api/autograd.py |
from typing import List, Union
from torchgen.api import cpp
from torchgen.api.types import (
ArgName,
ArrayRefCType,
BaseCType,
Binding,
ConstRefCType,
dimnameListT,
intArrayRefT,
iOptTensorListRefT,
iTensorListRefT,
NamedCType,
OptionalCType,
optionalIntArrayRefT,
... | pytorch-master | torchgen/api/structured.py |
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
from torchgen.api import cpp
from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
from torchgen.gen import pythonify_default
from torchgen.model import (
Argument,
BaseTy,
BaseType,
... | pytorch-master | torchgen/api/python.py |
from typing import Any, Dict, List, Optional, Tuple, Union
from torchgen.api.types import (
BaseCppType,
BaseCType,
boolT,
CType,
deviceT,
doubleT,
layoutT,
ListCType,
longT,
memoryFormatT,
NamedCType,
OptionalCType,
scalarT,
scalarTypeT,
stringT,
SymIntT... | pytorch-master | torchgen/api/lazy.py |
from torchgen.model import NativeFunctionsGroup
# Follows dispatcher calling convention, but:
# - Mutable arguments not allowed. Meta functions are always
# written in functional form. Look at FunctionSchema.signature()
# - No tensor returns; instead we return a TensorMeta describing
# the tensor in ques... | pytorch-master | torchgen/api/meta.py |
import re
import sys
from pathlib import Path
from mypy.plugin import Plugin
def get_correct_mypy_version():
# there's probably a more elegant way to do this
match, = re.finditer(
r'mypy==(\d+(?:\.\d+)*)',
Path('.circleci/docker/requirements-ci.txt').read_text(),
)
version, = match.gr... | pytorch-master | mypy_plugins/check_mypy_version.py |
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep
from hypothesis import given, settings
dyndep.InitOpsLibrary("@/caffe2/modules/detectron:detectron_ops")
class TestUpsampleNearestOp(hu.HypothesisTestCase):
... | pytorch-master | modules/detectron/upsample_nearest_op_test.py |
#!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import generate_config_yml
CHECKED_IN_FILE = "config.yml"
REGENERATION_SCRIPT = "regenerate.sh"
PARENT_DIR = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
README_PATH = os.path.join(PARENT_DIR, "README.md")
ERROR_MESSAGE_... | pytorch-master | .circleci/ensure-consistency.py |
#!/usr/bin/env python3
"""
This script is the source of truth for config.yml.
Please see README.md in this directory for details.
"""
import os
import shutil
import sys
from collections import namedtuple
import cimodel.data.simple.docker_definitions
import cimodel.data.simple.mobile_definitions
import cimodel.data.s... | pytorch-master | .circleci/generate_config_yml.py |
pytorch-master | .circleci/cimodel/__init__.py | |
pytorch-master | .circleci/cimodel/lib/__init__.py | |
from dataclasses import dataclass, field
from typing import Optional, Dict
def X(val):
"""
Compact way to write a leaf node
"""
return val, []
def XImportant(name):
"""Compact way to write an important (run on PRs) leaf node"""
return (name, [("important", [X(True)])])
@dataclass
class Ver... | pytorch-master | .circleci/cimodel/lib/conf_tree.py |
def quote(s):
return sandwich('"', s)
def sandwich(bread, jam):
return bread + jam + bread
def override(word, substitutions):
return substitutions.get(word, word)
| pytorch-master | .circleci/cimodel/lib/miniutils.py |
from collections import OrderedDict
import cimodel.lib.miniutils as miniutils
LIST_MARKER = "- "
INDENTATION_WIDTH = 2
def is_dict(data):
return type(data) in [dict, OrderedDict]
def is_collection(data):
return is_dict(data) or type(data) is list
def render(fh, data, depth, is_list_member=False):
"... | pytorch-master | .circleci/cimodel/lib/miniyaml.py |
from cimodel.lib.conf_tree import ConfigNode
CONFIG_TREE_DATA = [
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.... | pytorch-master | .circleci/cimodel/data/pytorch_build_data.py |
from collections import OrderedDict
import cimodel.data.simple.util.branch_filters as branch_filters
import cimodel.data.binary_build_data as binary_build_data
import cimodel.lib.conf_tree as conf_tree
import cimodel.lib.miniutils as miniutils
class Conf(object):
def __init__(self, os, gpu_version, pydistro, parm... | pytorch-master | .circleci/cimodel/data/binary_build_definitions.py |
PHASES = ["build", "test"]
CUDA_VERSIONS = [
"102",
"113",
"116",
"117",
]
ROCM_VERSIONS = [
"4.3.1",
"4.5.2",
]
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
STANDARD_PYTHON_VERSIONS = [
"3.7"... | pytorch-master | .circleci/cimodel/data/dimensions.py |
pytorch-master | .circleci/cimodel/data/__init__.py | |
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import List, Optional
import cimodel.data.dimensions as dimensions
import cimodel.lib.conf_tree as conf_tree
import cimodel.lib.miniutils as miniutils
from cimodel.data.pytorch_build_data import CONFIG_TREE_DATA, TopLevelNode
from... | pytorch-master | .circleci/cimodel/data/pytorch_build_definitions.py |
"""
This module models the tree of configuration variants
for "smoketest" builds.
Each subclass of ConfigNode represents a layer of the configuration hierarchy.
These tree nodes encapsulate the logic for whether a branch of the hierarchy
should be "pruned".
"""
from collections import OrderedDict
from cimodel.lib.co... | pytorch-master | .circleci/cimodel/data/binary_build_data.py |
pytorch-master | .circleci/cimodel/data/simple/__init__.py | |
from cimodel.data.simple.util.versions import MultiPartVersion
import cimodel.lib.miniutils as miniutils
XCODE_VERSION = MultiPartVersion([12, 5, 1])
class ArchVariant:
def __init__(self, name, custom_build_name=""):
self.name = name
self.custom_build_name = custom_build_name
def render(self... | pytorch-master | .circleci/cimodel/data/simple/ios_definitions.py |
class MacOsJob:
def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()):
# extra_props is tuple type, because mutable data structures for argument defaults
# is not recommended.
self.os_version = os_version
self.is_build = is_build
self.is_test = is... | pytorch-master | .circleci/cimodel/data/simple/macos_definitions.py |
from collections import OrderedDict
from cimodel.data.simple.util.branch_filters import gen_filter_dict
from cimodel.lib.miniutils import quote
CHANNELS_TO_PRUNE = ["pytorch-nightly", "pytorch-test"]
PACKAGES_TO_PRUNE = "pytorch torchvision torchaudio torchtext ignite torchcsprng"
def gen_workflow_job(channel: str... | pytorch-master | .circleci/cimodel/data/simple/anaconda_prune_defintions.py |
"""
PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
"""
import cimodel.lib.miniutils as miniutils
import cimodel.data.simple.util.branch_filters
class MobileJob:
def __init__(
self,
docker_image,
docker_requires,
variant_parts,
... | pytorch-master | .circleci/cimodel/data/simple/mobile_definitions.py |
import cimodel.data.simple.ios_definitions as ios_definitions
import cimodel.lib.miniutils as miniutils
class IOSNightlyJob:
def __init__(self,
variant,
is_full_jit=False,
is_upload=False):
self.variant = variant
self.is_full_jit = is_full_jit
... | pytorch-master | .circleci/cimodel/data/simple/nightly_ios.py |
from collections import OrderedDict
from cimodel.lib.miniutils import quote
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
# NOTE: All hardcoded docker image builds have been migrated to GHA
IMAGE_NAMES = [
]
# This entry should be an element from the list above
# This should contai... | pytorch-master | .circleci/cimodel/data/simple/docker_definitions.py |
AWS_DOCKER_HOST = "308535385114.dkr.ecr.us-east-1.amazonaws.com"
def gen_docker_image(container_type):
return (
"/".join([AWS_DOCKER_HOST, "pytorch", container_type]),
f"docker-{container_type}",
)
def gen_docker_image_requires(image_name):
return [f"docker-{image_name}"]
DOCKER_IMAGE_BA... | pytorch-master | .circleci/cimodel/data/simple/util/docker_constants.py |
pytorch-master | .circleci/cimodel/data/simple/util/__init__.py | |
NON_PR_BRANCH_LIST = [
"main",
"master",
r"/ci-all\/.*/",
r"/release\/.*/",
]
PR_BRANCH_LIST = [
r"/gh\/.*\/head/",
r"/pull\/.*/",
]
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
def gen_filter_dict(
branches_list=NON_PR_BRANCH_LIST,
tags_list=None
):
"""Generates a filter... | pytorch-master | .circleci/cimodel/data/simple/util/branch_filters.py |
class MultiPartVersion:
def __init__(self, parts, prefix=""):
self.parts = parts
self.prefix = prefix
def prefixed_parts(self):
"""
Prepends the first element of the version list
with the prefix string.
"""
if self.parts:
return [self.prefix +... | pytorch-master | .circleci/cimodel/data/simple/util/versions.py |
# Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0
import re
import json
import os
import sys
import requests
import time
AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRE... | pytorch-master | .circleci/scripts/trigger_azure_pipeline.py |
#!/usr/bin/env python3
import os
import sys
import yaml
# Need to import modules that lie on an upward-relative path
sys.path.append(os.path.join(sys.path[0], '..'))
import cimodel.lib.miniyaml as miniyaml
def regurgitate(depth, use_pyyaml_formatter=False):
data = yaml.safe_load(sys.stdin)
if use_pyyaml_f... | pytorch-master | .circleci/codegen_validation/normalize_yaml_fragment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import distutils.command.clean
import shutil
import glob
import os
import subprocess
from setuptools import setup... | pytorch-master | functorch/setup.py |
import yaml
import csv
import torch
from collections import defaultdict
def get_ops_for_key(key):
# Needs modified PyTorch C++ code to work
if key is None:
ops = torch._C._dispatch_get_registrations_for_dispatch_key()
else:
ops = torch._C._dispatch_get_registrations_for_dispatch_key(key)
... | pytorch-master | functorch/op_analysis/gen_data.py |
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from typing import Any, List, NamedTuple, Optional, BinaryIO
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stder... | pytorch-master | functorch/tools/lint/black_linter.py |
"""
Initializer script that installs stuff to pip.
"""
import os
import argparse
import logging
import subprocess
import sys
import time
from typing import List
def run_command(args: List[str]) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
tr... | pytorch-master | functorch/tools/lint/pip_init.py |
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Set, Pattern
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flu... | pytorch-master | functorch/tools/lint/flake8_linter.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from torch.testing._internal.common_utils import (
TestCase, r... | pytorch-master | functorch/test/test_eager_transforms.py |
# Owner(s): ["module: functorch"]
import torch
from functorch.compile import minifier
from functorch._src.compile_utils import get_placeholders, get_outputs
from functorch import make_fx
from torch.testing._internal.common_utils import TestCase, run_tests
class TestMinifier(TestCase):
def test_has_mul_minifier(s... | pytorch-master | functorch/test/test_minifier.py |
# Owner(s): ["module: functorch"]
import functorch
from unittest.mock import patch
import functools
from torch.testing._internal.common_utils import run_tests
import test_compile_cache
import test_pythonkey
def make_functionalize_fn(fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
with patch.objec... | pytorch-master | functorch/test/test_functionalize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from functorch.dim import dims, dimlists, softmax, cat
import math
class Linea... | pytorch-master | functorch/test/attn_ft.py |
import torch
import copy
from torch.testing._internal.common_methods_invocations import op_db
from functorch_additional_op_db import additional_op_db
from enum import Enum
import functorch._src.top_operators_github_usage as top_ops
import pprint
import unittest
import enum
from torch.testing._internal.common_device_typ... | pytorch-master | functorch/test/discover_coverage.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
import math
class BertSelfAttention(nn.Module):
def __init__(self, hidden_s... | pytorch-master | functorch/test/attn_positional.py |
import re
import torch
"""
Instructions:
1. pytest -n 8 test/test_vmap.py test/test_ops.py test/test_pythonkey.py > result.txt
2. python test/xfail_suggester.py
"""
with open('result.txt') as f:
lines = f.readlines()
failed = [line for line in lines if line.startswith('FAILED')]
p = re.compile('FAILED test/test... | pytorch-master | functorch/test/xfail_suggester.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.testing._internal.common_utils import TestCase, run_tests
import to... | pytorch-master | functorch/test/test_pythonkey.py |
from functools import partial
import itertools
import unittest
import torch
from torch.testing import \
(floating_types, floating_types_and, all_types_and_complex_and)
from torch.testing._internal.common_utils import make_tensor
from torch.testing._internal.common_methods_invocations import OpInfo, SampleInput, D... | pytorch-master | functorch/test/functorch_additional_op_db.py |
# Owner(s): ["module: functorch"]
import torch
import torch.nn as nn
import torch.fx as fx
from functorch import make_fx
from torch.nn import functional as F
from functorch.compile import memory_efficient_fusion
from functorch._src.compile_utils import fx_graph_cse
from torch.testing._internal.common_utils import Test... | pytorch-master | functorch/test/test_memory_efficient_fusion.py |
# Owner(s): ["module: functorch"]
import torch
import functorch
from torch.testing._internal.common_utils import run_tests, TestCase, IS_WINDOWS
import unittest
from functorch.compile import aot_function, nop
class TestCompileCache(TestCase):
def check(self, a, b, aot_fn, fn):
a_clone = a.clone().detac... | pytorch-master | functorch/test/test_compile_cache.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from torch.testing._internal.common_utils import TestCase, r... | pytorch-master | functorch/test/test_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
import functorch
from functorch import vmap
import torch.utils._pytree as pytree
fr... | pytorch-master | functorch/test/common_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch.dim import Tensor, Dim, dims, dimlists, stack, DimensionBindError, DimList
from attn_ft import Ber... | pytorch-master | functorch/test/test_dims.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import OrderedDict
from unittest.case import skipIf
from torch.tes... | pytorch-master | functorch/test/test_vmap.py |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensi... | pytorch-master | functorch/docs/source/conf.py |
# This example was adapated from https://github.com/muhrin/milad
# It is licensed under the GLPv3 license. You can find a copy of it
# here: https://www.gnu.org/licenses/gpl-3.0.en.html .
import torch
from torch import nn
from torch.nn.functional import mse_loss
from functorch import jacrev, vmap
sigma = 0.5
epsilon ... | pytorch-master | functorch/examples/lennard_jones/lennard_jones.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Runs CIFAR10 training with differential privacy.
"""
import argparse
import logging
import shutil
import sys
from datetime import datetime, timedelta
import numpy as np
import torch
import torch.nn as nn
import torch.op... | pytorch-master | functorch/examples/dp_cifar10/cifar10_transforms.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Runs CIFAR10 training with differential privacy.
"""
import argparse
import logging
import shutil
import sys
from datetime import datetime, timedelta
import numpy as np
import torch
import torch.nn as nn
import torch.op... | pytorch-master | functorch/examples/dp_cifar10/cifar10_opacus.py |
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functorch import make_functional, grad_and_value, vmap, combine_state_for_ensemble
# Adapted from http://willwhitney.com/parallel-training-jax.html , which is a
# tutorial on Model Ensembling with JAX by Will Whitney.
#... | pytorch-master | functorch/examples/ensembling/parallel_train.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requi... | pytorch-master | functorch/examples/maml_omniglot/maml-omniglot-ptonly.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requi... | pytorch-master | functorch/examples/maml_omniglot/maml-omniglot-higher.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requi... | pytorch-master | functorch/examples/maml_omniglot/maml-omniglot-transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ... | pytorch-master | functorch/examples/maml_omniglot/support/omniglot_loaders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch import make_functional
from functorch.compile import nnc_jit
import torch
import torch.nn as nn
im... | pytorch-master | functorch/examples/compilation/linear_train.py |
import timeit
from functorch.compile import compiled_module, tvm_compile
import torch.nn as nn
import torch
def nop(f, _):
return f
fw_compiler = tvm_compile(target='llvm', tuning_logfile='fw_keops')
bw_compiler = tvm_compile(target='llvm', tuning_logfile='bw_keops')
fw_compiler = nop
bw_compiler = nop
def ru... | pytorch-master | functorch/examples/compilation/fuse_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch import grad, make_fx
from functorch.compile import nnc_jit
import torch
import time
def f(x):
... | pytorch-master | functorch/examples/compilation/simple_function.py |
from functorch.compile import aot_function, tvm_compile
import torch
import time
import torch.utils
a = torch.randn(2000, 1, 4, requires_grad=True)
b = torch.randn(1, 2000, 4)
def f(a):
return (a * b).sum(dim=0)
fw_compiler = tvm_compile(target='llvm', tuning_logfile='fw_keops')
bw_compiler = tvm_compile(targe... | pytorch-master | functorch/examples/compilation/eager_fusion.py |
# Eric Jang originally wrote an implementation of MAML in JAX
# (https://github.com/ericjang/maml-jax).
# We translated his implementation from JAX to PyTorch.
from functorch import grad, vmap, make_functional
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch import nn
from torch.n... | pytorch-master | functorch/examples/maml_regression/evjang_transforms_module.py |
# Eric Jang originally wrote an implementation of MAML in JAX
# (https://github.com/ericjang/maml-jax).
# We translated his implementation from JAX to PyTorch.
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch.nn import functional as F
import matplotlib as mpl
mpl.use('Agg')
def ... | pytorch-master | functorch/examples/maml_regression/evjang.py |
# Eric Jang originally wrote an implementation of MAML in JAX
# (https://github.com/ericjang/maml-jax).
# We translated his implementation from JAX to PyTorch.
from functorch import grad, vmap
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch.nn import functional as F
import matplo... | pytorch-master | functorch/examples/maml_regression/evjang_transforms.py |
import pandas
import matplotlib.pyplot as plt
df = pandas.read_csv("perf.csv")
ops = pandas.unique(df["operator"])
nops = len(ops)
pivot_op_shape = df.pivot_table(values="time", index=["operator", "shape"], columns=["fuser"])
pivot_speedups = (pivot_op_shape.T / pivot_op_shape["eager"]).T
plt.rcParams["figure.figsiz... | pytorch-master | functorch/benchmarks/process_scorecard.py |
#!/usr/bin/env python3
import argparse
import os
import logging
import pandas as pd
from functorch._src.benchmark_utils import compute_utilization
# process the chrome traces output by the pytorch profiler
# require the json input file's name to be in format {model_name}_chrome_trace_*.json
# the runtimes file shoul... | pytorch-master | functorch/benchmarks/chrome_trace_parser.py |
import sys
import time
import torch
import inspect
import itertools
from functorch import pointwise_operator
torch.set_num_threads(1)
torch._C._debug_set_fusion_group_inlining(False)
def rand(*shape):
return torch.rand(*shape).mul(16).add(1)
# -------------------------------------------------------------------... | pytorch-master | functorch/benchmarks/pointwise_scorecard.py |
import torch
import torch.nn as nn
import torchvision.models as models
from opacus.utils.module_modification import convert_batchnorm_modules
import time
from functorch import vmap, grad
from functorch import make_functional
from opacus import PrivacyEngine
device = 'cuda'
batch_size = 128
torch.manual_seed(0)
model... | pytorch-master | functorch/benchmarks/per_sample_grads.py |
import torch
import torch.fx as fx
from functorch import make_fx
from torch.profiler import profile, ProfilerActivity
from functorch._src.compile_utils import fx_graph_cse
def profile_it(f, inp):
for _ in range(5):
f(inp)
itr = 5
with profile(activities=[ProfilerActivity.CUDA], record_shapes=True... | pytorch-master | functorch/benchmarks/cse.py |
from functools import partial
import numpy as np
import pandas as pd
import timeit
import torch
from functorch.compile import pointwise_operator
WRITE_CSV = False
CUDA = False
SIZES = [1, 512, 8192]
NUMBER = [100, 10, 1, 1]
REPEAT = 20
@pointwise_operator
def nnc_add(a, b):
return a + b
@pointwise_operator
def... | pytorch-master | functorch/benchmarks/operator_authoring.py |
import torch
from functorch.compile import memory_efficient_fusion, clear_compile_cache
import benchmark_helper
device = "cuda"
dtype = torch.float16
# LightSeq pattern 1
class DropoutResBias:
@staticmethod
def fn(input, bias, residual):
a = torch.add(input, bias)
b = torch.nn.functional.drop... | pytorch-master | functorch/benchmarks/transformer_fusion_patterns/benchmark.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| pytorch-master | functorch/benchmarks/transformer_fusion_patterns/__init__.py |
import torch
from torch.profiler import profile, record_function, ProfilerActivity
from torch.utils.benchmark import Timer
import time
def profile_cuda_kernels(fn, args, string_id="Model time"):
print("################################################")
print(f"#### Profiling for {string_id} starts #########")... | pytorch-master | functorch/benchmarks/transformer_fusion_patterns/benchmark_helper.py |
import torch
from functorch.compile import memory_efficient_pointwise_fusion, clear_compile_cache
import benchmark_helper
# ALL comments regarding the patetrns
def bias_gelu_dropout(input, bias):
a = torch.add(input, bias)
b = torch.nn.functional.gelu(a)
c = torch.nn.functional.dropout(b, p=0.6, training... | pytorch-master | functorch/benchmarks/transformer_fusion_patterns/bias_gelu_dropout.py |
"""
==========================
Per-sample-gradients
==========================
What is it?
--------------------------------------------------------------------
Per-sample-gradient computation is computing the gradient for each and every
sample in a batch of data. It is a useful quantity in differential privacy
and opt... | pytorch-master | functorch/notebooks/_src/plot_per_sample_gradients.py |
"""
==========================
Model ensembling
==========================
This example illustrates how to vectorize model ensembling using vmap.
What is model ensembling?
--------------------------------------------------------------------
Model ensembling combines the predictions from multiple models together.
Tradi... | pytorch-master | functorch/notebooks/_src/plot_ensembling.py |
"""
=============================
Jacobians, hessians, and more
=============================
Computing jacobians or hessians are useful in a number of non-traditional
deep learning models. It is difficult (or annoying) to compute these quantities
efficiently using a standard autodiff system like PyTorch Autograd; fun... | pytorch-master | functorch/notebooks/_src/plot_jacobians_and_hessians.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import _C
# Monkey patch PyTorch. This is a hack, we should try to upstream
# these pieces.
f... | pytorch-master | functorch/functorch/__init__.py |
from .batch_norm_replacement import replace_all_batch_norm_modules_
# PyTorch forward-mode is not mature yet
from .._src.eager_transforms import jvp, jacfwd, hessian, functionalize
from .._src.vmap import chunk_vmap
| pytorch-master | functorch/functorch/experimental/__init__.py |
import torch.nn as nn
def batch_norm_without_running_stats(module: nn.Module):
if isinstance(module, nn.modules.batchnorm._BatchNorm) and module.track_running_stats:
module.running_mean = None
module.running_var = None
module.num_batches_tracked = None
module.track_running_stats = ... | pytorch-master | functorch/functorch/experimental/batch_norm_replacement.py |
import torch
import torch.fx as fx
import operator
import math
import torch.utils._pytree as pytree
import copy
import os
from collections import defaultdict
from torch.fx.passes import graph_drawer
from typing import Tuple
from .compile_utils import fx_graph_cse, get_aten_target
from . import config
AOT_PARTITIONER_D... | pytorch-master | functorch/functorch/_src/partitioners.py |
import torch
import torch.fx as fx
from torch.utils._pytree import tree_flatten
aten = torch.ops.aten
def get_aten_target(node):
if hasattr(node.target, 'overloadpacket'):
return node.target.overloadpacket
return node.target
rand_ops = [aten.dropout, aten._fused_dropout, aten._standard_gamma,
... | pytorch-master | functorch/functorch/_src/compile_utils.py |
# Polyfilled from pytorch core while we figure out the `remove_duplicate` issues.
def _named_members(mod, get_members_fn, prefix='', recurse=True, remove_duplicate=True):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = mod.named_modules(prefix=prefix, remove_dupl... | pytorch-master | functorch/functorch/_src/named_members_polyfill.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Global flags for aot autograd
"""
import os
use_functionalize = False
# TODO: flip this to true by default
... | pytorch-master | functorch/functorch/_src/config.py |
import torch
from torch import Tensor
import torch._decomp
from typing import Tuple, List, Optional
aten = torch.ops.aten
decomposition_table = torch._decomp.decomposition_table
register_decomposition = torch._decomp.register_decomposition
get_decompositions = torch._decomp.get_decompositions
# Decompositions have b... | pytorch-master | functorch/functorch/_src/decompositions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ["make_fx", "ProxyTensor", "dispatch_trace", "PythonKeyTracer", "pythonkey_decompose"]
from torch.fx.exp... | pytorch-master | functorch/functorch/_src/python_key.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils._pytree import tree_flatten, tree_unflatten
def tree_map_(fn_, pytree):
flat_args, _ = tre... | pytorch-master | functorch/functorch/_src/pytree_hacks.py |
"""
From https://docs.google.com/spreadsheets/d/12R3nCOLskxPYjjiNkdqy4OdQ65eQp_htebXGODsjSeA/edit#gid=0
Try to keep this list in sync with that.
"""
top_torch = [
("t", 6837449),
("tensor", 585786),
("mode", 462182),
("cat", 394818),
("max", 368038),
("zeros", 329495),
("load", 327756),
... | pytorch-master | functorch/functorch/_src/top_operators_github_usage.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| pytorch-master | functorch/functorch/_src/__init__.py |
import torch
import functorch._C
m = functorch._C._dispatch_library("FRAGMENT", "aten", "")
def custom_vjp(name, filter_fn, fwd_fn, bwd_fn):
m.def_(f"{name}(Tensor[] args) -> Tensor[]")
m.impl(f"{name}", "CompositeImplicitAutograd", fwd_fn)
m.def_(f"{name}_vjp(Tensor[] args) -> Tensor[]")
m.impl(f"{... | pytorch-master | functorch/functorch/_src/custom_function.py |
import torch.fx as fx
import copy
import torch
import math
from typing import Callable, List
from functools import wraps, partial
from dataclasses import dataclass
from .compile_utils import get_placeholders, get_outputs
class ConcreteProp(torch.fx.Interpreter):
def run_node(self, n):
result = super().run_... | pytorch-master | functorch/functorch/_src/fx_minifier.py |
import torch
import functorch._C as _C
import functools
# Monkeypatch tensor printing in pytorch
_old_str = torch._tensor_str._str
def prep_value(text, indent=4):
first_line_txt = ''
lines = text.split('\n')
lines[0] = lines[0]
lines[0] = ' ' * indent + first_line_txt + lines[0]
for i in range(1,... | pytorch-master | functorch/functorch/_src/monkey_patching.py |
import dataclasses
import warnings
from contextlib import contextmanager, nullcontext
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
import torch.fx.traceback as fx_traceback
import torch.nn as nn
import torch.utils._pytree as pytree
import torch.utils.dlpack
fro... | pytorch-master | functorch/functorch/_src/aot_autograd.py |
import copy
import logging
import os
import pickle
import random
from functools import partial
from typing import Callable, Optional, Tuple, Union
import torch
import torch.fx as fx
import torch.nn as nn
from .aot_autograd import aot_function, aot_module, make_boxed_compiler
from .compile_utils import strip_overloads... | pytorch-master | functorch/functorch/_src/compilers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.