python_code
stringlengths
0
258k
## @package generator # Module caffe2.python.docs.generator from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os from caffe2.python import core, workspace from caffe2.python.docs.formatter import Markdown OpSchema...
## @package github # Module caffe2.python.docs.github from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python.docs.formatter import Markdown from caffe2.python.docs.generator import OpDocGenerator, DocUploade...
## @package convnet_benchmarks # Module caffe2.python.convnet_benchmarks from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals """ Benchmark for common convnets. Speed on Titan X, with 10 warmup steps and 10 main steps and w...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.pytho...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mk...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mk...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.pytho...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.pytho...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.pytho...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mk...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mk...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.pytho...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mk...
## @package lmdb_create_example # Module caffe2.python.examples.lmdb_create_example from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import numpy as np import lmdb from caffe2.proto import caffe2_pb2 fr...
## @package resnet50_trainer # Module caffe2.python.examples.resnet50_trainer from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import numpy as np import time import os from caffe2.python ...
## @package char_rnn # Module caffe2.python.examples.char_rnn from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, workspace, model_helper, utils, brew from caffe2.python.rnn_cell import LSTM...
## @package helpers # Module caffe2.python.tutorials.helpers import numpy as np import skimage.io import skimage.transform import urllib2 def crop_center(img,cropx,cropy): y,x,c = img.shape startx = x//2-(cropx//2) starty = y//2-(cropy//2) return img[starty:starty+cropy,startx:startx+cropx] def rescal...
## @package predictor_py_utils # Module caffe2.python.predictor.predictor_py_utils from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def create_predict_net(predictor_export_meta): """...
## @package predictor_exporter # Module caffe2.python.predictor.predictor_exporter from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.proto import caffe2_pb2 from caffe2.proto import metanet_pb2 from caffe2.py...
## @package serde # Module caffe2.python.predictor.serde from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def serialize_protobuf_struct(protobuf_struct): return protobuf_struct.SerializeToString() def deserialize...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python.test_util import TestCase from caffe2.python import workspace, brew from caffe2.python.model_helper import ModelHelper from caffe2.python.predictor impo...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import tempfile import unittest import numpy as np from caffe2.python import cnn, workspace, core from caffe2.python.predictor_constants import predictor_constants as pc...
## @package mobile_exporter # Module caffe2.python.mobile_exporter from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, utils from caffe2.proto import caffe2_pb2 def Export(workspace, net, ...
## @package fc # Module caffe2.python.helpers.fc from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def _FC_or_packed_FC( model, op_call, blob_in, blob_out, dim_in, dim_out, weight_ini...
## @package algebra # Module caffe2.python.helpers.algebra from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def transpose(model, blob_in, blob_out, use_cudnn=False, **kwargs): """Transpose.""" if use_cudnn: ...
## @package tools # Module caffe2.python.helpers.tools from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def image_input( model, blob_in, blob_out, order="NCHW", use_gpu_transform=False, **kwargs ): if order == ...
## @package pooling # Module caffe2.python.helpers.pooling ## @package fc # Module caffe2.python.helpers.pooling from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def max_pool(model, blob_in, blob_out, use_cudnn=False, ...
## @package arra_helpers # Module caffe2.python.helpers.array_helpers from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def concat(model, blobs_in, blob_out, order="NCHW", **kwargs): """Depth Concat.""" return m...
## @package nonlinearity # Module caffe2.python.helpers.nonlinearity from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def prelu(model, blob_in, blob_out, num_channels=1, slope_init=None,...
## @package train # Module caffe2.python.helpers.train from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, scope from caffe2.proto import caffe2_pb2 def _get_weights(model, namescope=None)...
## @package dropout # Module caffe2.python.helpers.dropout from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def dropout(model, blob_in, blob_out, use_cudnn=False, **kwargs): """dropout""" if use_cudnn: ...
## @package conv # Module caffe2.python.helpers.conv from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def _ConvBase( model, is_nd, blob_in, blob_out, dim_in, dim_...
from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import copy import threading _threadlocal_scope = threading.local() @contextlib.contextmanager def arg_scope(single_helper_or_list, **kwargs): global _threadlocal_scope if not isinst...
## @package normalization # Module caffe2.python.helpers.normalization from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, scope from caffe2.proto import caffe2_pb2 def lrn(model, blob_in,...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.proto import caffe2_pb2 from caffe2.python import workspace, core, lstm_benchmark, utils from copy import copy @utils.debug def Compare(args): results = [...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import uuid from caffe2.distributed.store_ops_test_util import StoreOpsTests from caffe2.python import core, workspace, dyndep from caffe2.python.test_util imp...
## @package store_ops_test_util # Module caffe2.distributed.store_ops_test_util from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from multiprocessing import Process, Queue import numpy as np from caffe2.python import ...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import errno import os import tempfile import shutil from caffe2.distributed.store_ops_test_util import StoreOpsTests from caffe2.python import core, workspace, dyndep f...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from scipy.sparse import coo_matrix from hypothesis import given import hypothesis.strategies as st from caffe2.python import core import caffe2.pyth...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from hypothesis import given import hypothesis.strategies as st from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util a...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from scipy.sparse import coo_matrix from hypothesis import given import hypothesis.strategies as st from caffe2.python import core import caffe2.pyth...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from scipy.sparse import coo_matrix from caffe2.python import core, workspace from caffe2.python.test_util import TestCase def test_reshape(old_shap...
## @package convnet_benchmarks # Module caffe2.experiments.python.convnet_benchmarks from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals """ Benchmark for common convnets. (NOTE: Numbers below prior with missing parameter=...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from hypothesis import given import hypothesis.strategies as st from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util a...
## @package SparseTransformer # Module caffe2.experiments.python.SparseTransformer from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import workspace import scipy.sparse class NetDefNode(): def _...
## @package net_construct_bench # Module caffe2.experiments.python.net_construct_bench from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import time from caffe2.python import workspace, da...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, assume, settings import numpy as np import time import os from caffe2.python import core,...
from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import core, workspace, dyndep, test_util dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/warpctc:ctc_ops') workspace.G...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, assume import numpy as np import time import os from caffe2.proto import caffe2_pb2 from ...
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from hypothesis import given import hypothesis.strategies as st from multiprocessing import Process, Queue import numpy as np import os import pic...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, dyndep import caffe2.python.hypothesis_test_util as hu from hypothesis import given import hypothesis.strategies as st import numpy as np...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, dyndep import caffe2.python.hypothesis_test_util as hu from hypothesis import given import hypothesis.strategies as st import numpy as np...
## @package utils # Module caffe2.contrib.perf_contbld.utils from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import getpass import time from collections import defaultdict import numpy as np from caffe2.proto import p...
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from caffe2.proto import caffe2_pb2 from caffe2.python import core, dyndep, workspace dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/prof:cuda_profile_op...
## @package htrace_to_chrome # Module caffe2.contrib.prof.htrace_to_chrome from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import json import re import sys display_levels = ["network", "worker", "opera...
## @package process # Module doxygen.process # Script to insert preamble for doxygen and regen API docs import glob, os, shutil # Module caffe2...caffe2.python.control_test def insert(originalfile,first_line,description): with open(originalfile,'r') as f: f1 = f.readline() if(f1.find(first_line)<0...
## @package diagnose_protobuf # Module scripts.diagnose_protobuf """Diagnoses the current protobuf situation. Protocol buffer needs to be properly installed for Caffe2 to work, and sometimes it is rather tricky. Specifically, we will need to have a consistent version between C++ and python simultaneously. This is a co...
## @package get_python_cmake_flags # Module scripts.get_python_cmake_flags ############################################################################## # Use this script to find your preferred python installation. ############################################################################## # # You can use the follo...
import torch from setuptools import setup, find_packages import subprocess import sys if not torch.cuda.is_available(): print("\nWarning: Torch did not find available GPUs on this system.\n", "If your intention is to cross-compile, this is not an error.\n") print("torch.__version__ = ", torch.__versio...
# May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten import torch from . import parallel from . import amp from . import fp16_utils # For optimizers and normalization there is no Python fallback. # Absence of cuda backend is a hard error. # I wo...
import torch from torch.nn.modules.batchnorm import _BatchNorm from torch.nn import functional as F import syncbn from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction class SyncBatchNorm(_BatchNorm): """ synchronized batch normalization module extented from `torch.nn.BatchNormNd` with the a...
import torch from torch.autograd.function import Function from apex.parallel import ReduceOp class SyncBatchnormFunction(Function): @staticmethod def forward(ctx, input, weight, bias, running_mean, running_variance, eps, process_group, world_size): torch.cuda.nvtx.range_push("sync_BN_fw") # ...
import torch if hasattr(torch.distributed, 'ReduceOp'): ReduceOp = torch.distributed.ReduceOp elif hasattr(torch.distributed, 'reduce_op'): ReduceOp = torch.distributed.reduce_op else: ReduceOp = torch.distributed.deprecated.reduce_op from .distributed import DistributedDataParallel, Reducer # This is tri...
import torch from torch.nn.modules.batchnorm import _BatchNorm from torch.nn import functional as F from .sync_batchnorm_kernel import SyncBatchnormFunction from apex.parallel import ReduceOp class SyncBatchNorm(_BatchNorm): """ synchronized batch normalization module extented from ``torch.nn.BatchNormNd`` ...
import torch import torch.distributed as dist from torch.nn.modules import Module from torch.autograd import Variable from collections import OrderedDict from itertools import chain import copy import importlib from ..multi_tensor_apply import multi_tensor_applier imported_flatten_impl = False def import_flatten_impl...
import torch from torch.autograd.function import Function import syncbn from apex.parallel import ReduceOp class SyncBatchnormFunction(Function): @staticmethod def forward(ctx, input, weight, bias, running_mean, running_variance, eps, track_running_stats = True, momentum = 1.0, process_group = None, channel_...
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter class LARC(object): """ :class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC, in which the ratio between gradient and parameter magnitudes is used to calcula...
import torch import sys import subprocess def docstring_hack(): """ Multiproc file which will launch a set of processes locally for multi-gpu usage: python -m apex.parallel.multiproc main.py ... """ pass argslist = list(sys.argv)[1:] world_size = torch.cuda.device_count() if '--world-size' in arg...
import math import torch import numbers from torch.nn.parameter import Parameter from torch.nn import init from torch.nn import functional as F import importlib class FusedLayerNormAffineFunction(torch.autograd.Function): def __init__(self, normalized_shape, eps=1e-6): global fused_layer_norm_cuda fused_laye...
from .fused_layer_norm import FusedLayerNorm
from .fp16util import ( BN_convert_float, network_to_half, prep_param_lists, model_grads_to_master_grads, master_params_to_model_params, tofp16, to_python_float, clip_grad_norm, convert_module, convert_network, FP16Model, ) from .fp16_optimizer import FP16_Optimizer from .lo...
import torch import torch.nn as nn from torch.autograd import Variable from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class tofp16(nn.Module): """ Utility module that implements:: def forward(self, input): return input.half() """ def __init__(self): ...
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from ..amp._amp_state import _amp_state, maybe_print from ..amp.scaler import LossScaler from ..multi_tensor_apply import multi_tensor...
import torch # item() is a recent addition, so this helps with backward compatibility. def to_python_float(t): if hasattr(t, 'item'): return t.item() else: return t[0] class LossScaler: """ Class that manages a static loss scale. This class is intended to interact with :class:`FP1...
from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048*32)
import torch class MultiTensorApply(object): available = False warned = False def __init__(self, chunk_size): try: import amp_C MultiTensorApply.available = True self.chunk_size = chunk_size except ImportError as err: MultiTensorApply.availab...
import types import torch import importlib class FusedAdam(torch.optim.Optimizer): """Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: ...
from .fused_adam import FusedAdam from .fp16_optimizer import FP16_Optimizer
import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class FP16_Optimizer(object): """ :class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer. Designed only to wrap apex.optimizers.FusedAdam. Refer to apex.fp16_utils documents for more information....
from .weight_norm import WeightNorm from .reparameterization import Reparameterization def apply_weight_norm(module, name='', dim=0, hook_child=True): """ Applies weight normalization to a parameter in the given module. If no parameter is provided, applies weight normalization to all parameters in mode...
import torch from torch.nn.parameter import Parameter from ..fp16_utils import Fused_Weight_Norm import time from .reparameterization import Reparameterization def _norm(p, dim): """Computes the norm over all dimensions except dim""" if dim is None: return p.norm() elif dim == 0: output_si...
import torch from torch.nn.parameter import Parameter import sys class Reparameterization(object): """ Class interface for performing weight reparameterizations Arguments: name (str): name of weight parameter dim (int): dimension over which to compute the norm module (nn.Module): par...
import types from ..fp16_utils import master_params_to_model_params from ..multi_tensor_apply import multi_tensor_applier from ._amp_state import maybe_print import torch class AmpOptimizerState(object): def __init__(self): pass def lazy_init_with_master_weights(self): stash = self._amp_stash ...
import torch # True for post-0.4, when Variables/Tensors merged. def variable_is_tensor(): v = torch.autograd.Variable() return isinstance(v, torch.Tensor) def tensor_is_variable(): x = torch.Tensor() return type(x) == torch.autograd.Variable # False for post-0.4 def tensor_is_float_tensor(): x =...
import contextlib import warnings import torch from . import utils from .opt import OptimWrapper from .scaler import LossScaler from ._amp_state import _amp_state, master_params, maybe_print from ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general from ..optimizers import FP16_Optimizer as FP16_Optimizer_for_...
import torch from torch._six import string_classes import functools import numpy as np import warnings from ._amp_state import _amp_state, warn_or_err, container_abcs from .handle import disable_casts from .scaler import LossScaler from ._process_optimizer import _process_optimizer from apex.fp16_utils import convert_n...
from . import compat, rnn_compat, utils, wrap from .handle import AmpHandle, NoOpHandle from .lists import functional_overrides, torch_overrides, tensor_overrides from ._amp_state import _amp_state from .frontend import * import functools import itertools import torch _DECORATOR_HANDLE = None _USER_CAST_REGISTRY = ...
import torch from ._initialize import _initialize from ._amp_state import _amp_state, warn_or_err, maybe_print class Properties(object): """ This class has two purposes: to establish a set of default properties, and to route setting of these attributes through __setattr__ so that (in theory) they can ...
from .amp import init, half_function, float_function, promote_function,\ register_half_function, register_float_function, register_promote_function from .handle import scale_loss, disable_casts from .frontend import initialize from ._amp_state import master_params, _amp_state
import torch from ..multi_tensor_apply import multi_tensor_applier from ._amp_state import _amp_state, master_params, maybe_print from itertools import product def scale_check_overflow_python(model_grad, master_grad, scale, check_overflow=False): # Exception handling for 18.04 compatibility if check_overflow: ...
VERSION = (0, 1, 0) __version__ = '.'.join(map(str, VERSION))
import contextlib import warnings from .scaler import LossScaler, master_params from ._amp_state import maybe_print import numpy as np class OptimWrapper(object): def __init__(self, optimizer, amp_handle, num_loss): self._optimizer = optimizer self._amp_handle = amp_handle self._num_loss ...
# This is a "header object" that allows different amp modules to communicate. # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. # But apparently it's ok: # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm import os import torch TORCH_MAJOR = int(to...
from . import compat import functools import itertools import torch def get_cuda_version(): return tuple(int(x) for x in torch.version.cuda.split('.')) def is_fp_tensor(x): if is_nested(x): # Fast-fail version of all(is_fp_tensor) for y in x: if not is_fp_tensor(y): ...
from . import compat from . import utils from ._amp_state import _amp_state from . import rnn_compat import functools import torch def make_cast_wrapper(orig_fn, cast_fn, handle, try_caching=False): @functools.wraps(orig_fn) def wrapper(*args, **kwargs): if not handle.is_active(...
from . import utils, wrap import torch _VF = torch._C._VariableFunctions RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] def _gen_VF_wrapper(name): def wrapper(*args, **kwargs): return getattr(_VF, name)(*args, **kwargs) return wrapper # Some python magic to generate an object that has the rnn ce...
import torch from .. import utils MODULE = torch FP16_FUNCS = [ # Low level functions wrapped by torch.nn layers. # The wrapper layers contain the weights which are then passed in as a parameter # to these functions. 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d'...
# TODO: think about the following two. They do weird things. # - torch.nn.utils.clip_grad (but it should always be fp32 anyway) # - torch.nn.utils.weight_norm # Notes: # F.instance_norm uses batch_norm internally. Which correctly handles # fp16 in/out with fp32 weights. So we shouldn't do anything for # either of...
from .. import compat from . import torch_overrides import importlib import torch # if compat.variable_is_tensor() and not compat.tensor_is_variable(): MODULE = torch.Tensor # else: # MODULE = torch.autograd.Variable FP16_FUNCS = [ '__matmul__', ] FP32_FUNCS = [ '__ipow__', '__pow__', '__rpow_...