id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
11,589
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _soundfile_available = importlib.util.find_spec("soundfile") is not None def is_soundfile_availble(): return _soundfile_available
null
11,590
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _timm_available = importlib.util.find_spec("timm") is not None def is_timm_available(): return _timm_available
null
11,591
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _torchaudio_available = importlib.util.find_spec("torchaudio") is not None def is_torchaudio_available(): return _torchaudio_available
null
11,592
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _torchaudio_available = importlib.util.find_spec("torchaudio") is not None def is_speech_available(): # For now this depends on torchaudio but the exact dependency might evolve in the future. return _torchaudio_available
null
11,593
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _phonemizer_available = importlib.util.find_spec("phonemizer") is not None def is_phonemizer_available(): return _phonemizer_available
null
11,594
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging if _torch_available: torch_version = version.parse(importlib_metadata.version("torch")) _torch_fx_available = (torch_version.major, torch_version.minor) >= ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION def torch_only_method(fn): def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper
null
11,595
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _is_ccl_available = ( importlib.util.find_spec("torch_ccl") is not None or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None ) def is_ccl_available(): return _is_ccl_available
null
11,596
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_sudachi_available(): return importlib.util.find_spec("sudachipy") is not None
null
11,597
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_jumanpp_available(): return (importlib.util.find_spec("pyknp") is not None) and (shutil.which("jumanpp") is not None)
null
11,598
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available PYTORCH_IMPORT_ERROR_WITH_TF = """ {0} requires the PyTorch library but it was not found in your environment. However, we were able to find a TensorFlow installation. TensorFlow classes begin with "TF", but are otherwise identically named to our PyTorch classes. This means that the TF equivalent of the class you tried to import would be "TF{0}". If you want to use TensorFlow, please use TF classes instead! If you really do want to use PyTorch please go to https://pytorch.org/get-started/locally/ and follow the instructions that match your environment. """ TF_IMPORT_ERROR_WITH_PYTORCH = """ {0} requires the TensorFlow library but it was not found in your environment. However, we were able to find a PyTorch installation. PyTorch classes do not begin with "TF", but are otherwise identically named to our TF classes. If you want to use PyTorch, please use those classes instead! If you really do want to use TensorFlow, please follow the instructions on the installation page https://www.tensorflow.org/install that match your environment. """ BACKENDS_MAPPING = OrderedDict( [ ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)), ("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ # Raise an error for users who might not realize that classes without "TF" are torch-only if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available(): raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name)) # Raise the inverse error for PyTorch users trying to load TF classes if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available(): raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name)) checks = (BACKENDS_MAPPING[backend] for backend in backends) failed = [msg.format(name) for available, msg in checks if not available()] if failed: raise ImportError("".join(failed))
null
11,599
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_available(): def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper
null
11,600
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_tf_available(): return _tf_available def tf_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires TF.") return wrapper
null
11,601
from ..utils import DummyObject, requires_backends def convert_slow_tokenizer(*args, **kwargs): requires_backends(convert_slow_tokenizer, ["sentencepiece", "tokenizers"])
null
11,602
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def is_flax_available(): return _flax_available def is_torch_fx_proxy(x): if is_torch_fx_available(): import torch.fx return isinstance(x, torch.fx.Proxy) return False try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) The provided code snippet includes necessary dependencies for implementing the `is_tensor` function. Write a Python function `def is_tensor(x)` to solve the following problem: Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`. Here is the function: def is_tensor(x): """ Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`. """ if is_torch_fx_proxy(x): return True if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(x, (jnp.ndarray, Tracer)): return True return isinstance(x, np.ndarray)
Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`.
11,603
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def _is_torch_device(x): import torch return isinstance(x, torch.device) def is_torch_available(): return _torch_available The provided code snippet includes necessary dependencies for implementing the `is_torch_device` function. Write a Python function `def is_torch_device(x)` to solve the following problem: Tests if `x` is a torch device or not. Safe to call even if torch is not installed. Here is the function: def is_torch_device(x): """ Tests if `x` is a torch device or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch_device(x)
Tests if `x` is a torch device or not. Safe to call even if torch is not installed.
11,604
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `to_py_obj` function. Write a Python function `def to_py_obj(obj)` to solve the following problem: Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. Here is the function: def to_py_obj(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list. """ if isinstance(obj, (dict, UserDict)): return {k: to_py_obj(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] elif is_tf_tensor(obj): return obj.numpy().tolist() elif is_torch_tensor(obj): return obj.detach().cpu().tolist() elif is_jax_tensor(obj): return np.asarray(obj).tolist() elif isinstance(obj, (np.ndarray, np.number)): # tolist also works on 0d np arrays return obj.tolist() else: return obj
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
11,605
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `to_numpy` function. Write a Python function `def to_numpy(obj)` to solve the following problem: Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. Here is the function: def to_numpy(obj): """ Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array. """ if isinstance(obj, (dict, UserDict)): return {k: to_numpy(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return np.array(obj) elif is_tf_tensor(obj): return obj.numpy() elif is_torch_tensor(obj): return obj.detach().cpu().numpy() elif is_jax_tensor(obj): return np.asarray(obj) else: return obj
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
11,606
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy The provided code snippet includes necessary dependencies for implementing the `find_labels` function. Write a Python function `def find_labels(model_class)` to solve the following problem: Find the labels used by a given model. Args: model_class (`type`): The class of the model. Here is the function: def find_labels(model_class): """ Find the labels used by a given model. Args: model_class (`type`): The class of the model. """ model_name = model_class.__name__ if model_name.startswith("TF"): signature = inspect.signature(model_class.call) elif model_name.startswith("Flax"): signature = inspect.signature(model_class.__call__) else: signature = inspect.signature(model_class.forward) if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p]
Find the labels used by a given model. Args: model_class (`type`): The class of the model.
11,607
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy The provided code snippet includes necessary dependencies for implementing the `flatten_dict` function. Write a Python function `def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = ".")` to solve the following problem: Flatten a nested dict into a single level dict. Here is the function: def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): """Flatten a nested dict into a single level dict.""" def _flatten_dict(d, parent_key="", delimiter="."): for k, v in d.items(): key = str(parent_key) + delimiter + str(k) if parent_key else k if v and isinstance(v, MutableMapping): yield from flatten_dict(v, key, delimiter=delimiter).items() else: yield key, v return dict(_flatten_dict(d, parent_key, delimiter))
Flatten a nested dict into a single level dict.
11,608
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def working_or_temp_dir(working_dir, use_temp_dir: bool = False): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir
null
11,609
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `transpose` function. Write a Python function `def transpose(array, axes=None)` to solve the following problem: Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. Here is the function: def transpose(array, axes=None): """ Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.transpose(array, axes=axes) elif is_torch_tensor(array): return array.T if axes is None else array.permute(*axes) elif is_tf_tensor(array): return tf.transpose(array, perm=axes) elif is_jax_tensor(array): return jnp.transpose(array, axes=axes) else: raise ValueError(f"Type not supported for transpose: {type(array)}.")
Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
11,610
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `reshape` function. Write a Python function `def reshape(array, newshape)` to solve the following problem: Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. Here is the function: def reshape(array, newshape): """ Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.reshape(array, newshape) elif is_torch_tensor(array): return array.reshape(*newshape) elif is_tf_tensor(array): return tf.reshape(array, newshape) elif is_jax_tensor(array): return jnp.reshape(array, newshape) else: raise ValueError(f"Type not supported for reshape: {type(array)}.")
Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
11,611
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `squeeze` function. Write a Python function `def squeeze(array, axis=None)` to solve the following problem: Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. Here is the function: def squeeze(array, axis=None): """ Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.squeeze(array, axis=axis) elif is_torch_tensor(array): return array.squeeze() if axis is None else array.squeeze(dim=axis) elif is_tf_tensor(array): return tf.squeeze(array, axis=axis) elif is_jax_tensor(array): return jnp.squeeze(array, axis=axis) else: raise ValueError(f"Type not supported for squeeze: {type(array)}.")
Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
11,612
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `expand_dims` function. Write a Python function `def expand_dims(array, axis)` to solve the following problem: Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. Here is the function: def expand_dims(array, axis): """ Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.expand_dims(array, axis) elif is_torch_tensor(array): return array.unsqueeze(dim=axis) elif is_tf_tensor(array): return tf.expand_dims(array, axis=axis) elif is_jax_tensor(array): return jnp.expand_dims(array, axis=axis) else: raise ValueError(f"Type not supported for expand_dims: {type(array)}.")
Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
11,613
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy def is_numpy_array(x): """ Tests if `x` is a numpy array or not. """ return _is_numpy(x) def is_torch_tensor(x): """ Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed. """ return False if not is_torch_available() else _is_torch(x) def is_tf_tensor(x): """ Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed. """ return False if not is_tf_available() else _is_tensorflow(x) def is_jax_tensor(x): """ Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed. """ return False if not is_flax_available() else _is_jax(x) The provided code snippet includes necessary dependencies for implementing the `tensor_size` function. Write a Python function `def tensor_size(array)` to solve the following problem: Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. Here is the function: def tensor_size(array): """ Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays. """ if is_numpy_array(array): return np.size(array) elif is_torch_tensor(array): return array.numel() elif is_tf_tensor(array): return tf.size(array) elif is_jax_tensor(array): return array.size else: raise ValueError(f"Type not supported for expand_dims: {type(array)}.")
Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
11,614
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) logging.Logger.warning_advice = warning_advice import logging def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None
null
11,615
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } def get_log_levels_dict(): return log_levels
null
11,616
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_name() -> str: return __name__.split(".")[0] def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False logging.Logger.warning_advice = warning_advice import logging The provided code snippet includes necessary dependencies for implementing the `get_logger` function. Write a Python function `def get_logger(name: Optional[str] = None) -> logging.Logger` to solve the following problem: Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. Here is the function: def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name)
Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module.
11,617
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False The provided code snippet includes necessary dependencies for implementing the `get_verbosity` function. Write a Python function `def get_verbosity() -> int` to solve the following problem: Return the current level for the 🤗 Transformers's root logger as an int. Returns: `int`: The logging level. <Tip> 🤗 Transformers has following logging levels: - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - 40: `transformers.logging.ERROR` - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` - 20: `transformers.logging.INFO` - 10: `transformers.logging.DEBUG` </Tip> Here is the function: def get_verbosity() -> int: """ Return the current level for the 🤗 Transformers's root logger as an int. Returns: `int`: The logging level. <Tip> 🤗 Transformers has following logging levels: - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - 40: `transformers.logging.ERROR` - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` - 20: `transformers.logging.INFO` - 10: `transformers.logging.DEBUG` </Tip>""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel()
Return the current level for the 🤗 Transformers's root logger as an int. Returns: `int`: The logging level. <Tip> 🤗 Transformers has following logging levels: - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - 40: `transformers.logging.ERROR` - 30: `transformers.logging.WARNING` or `transformers.logging.WARN` - 20: `transformers.logging.INFO` - 10: `transformers.logging.DEBUG` </Tip>
11,618
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Transformers's root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - `transformers.logging.ERROR` - `transformers.logging.WARNING` or `transformers.logging.WARN` - `transformers.logging.INFO` - `transformers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) The provided code snippet includes necessary dependencies for implementing the `set_verbosity_info` function. Write a Python function `def set_verbosity_info()` to solve the following problem: Set the verbosity to the `INFO` level. Here is the function: def set_verbosity_info(): """Set the verbosity to the `INFO` level.""" return set_verbosity(INFO)
Set the verbosity to the `INFO` level.
11,619
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Transformers's root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - `transformers.logging.ERROR` - `transformers.logging.WARNING` or `transformers.logging.WARN` - `transformers.logging.INFO` - `transformers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) The provided code snippet includes necessary dependencies for implementing the `set_verbosity_warning` function. Write a Python function `def set_verbosity_warning()` to solve the following problem: Set the verbosity to the `WARNING` level. Here is the function: def set_verbosity_warning(): """Set the verbosity to the `WARNING` level.""" return set_verbosity(WARNING)
Set the verbosity to the `WARNING` level.
11,620
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Transformers's root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - `transformers.logging.ERROR` - `transformers.logging.WARNING` or `transformers.logging.WARN` - `transformers.logging.INFO` - `transformers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) The provided code snippet includes necessary dependencies for implementing the `set_verbosity_debug` function. Write a Python function `def set_verbosity_debug()` to solve the following problem: Set the verbosity to the `DEBUG` level. Here is the function: def set_verbosity_debug(): """Set the verbosity to the `DEBUG` level.""" return set_verbosity(DEBUG)
Set the verbosity to the `DEBUG` level.
11,621
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Transformers's root logger. Args: verbosity (`int`): Logging level, e.g., one of: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` - `transformers.logging.ERROR` - `transformers.logging.WARNING` or `transformers.logging.WARN` - `transformers.logging.INFO` - `transformers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) The provided code snippet includes necessary dependencies for implementing the `set_verbosity_error` function. Write a Python function `def set_verbosity_error()` to solve the following problem: Set the verbosity to the `ERROR` level. Here is the function: def set_verbosity_error(): """Set the verbosity to the `ERROR` level.""" return set_verbosity(ERROR)
Set the verbosity to the `ERROR` level.
11,622
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils _default_handler: Optional[logging.Handler] = None def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False The provided code snippet includes necessary dependencies for implementing the `disable_default_handler` function. Write a Python function `def disable_default_handler() -> None` to solve the following problem: Disable the default handler of the HuggingFace Transformers's root logger. Here is the function: def disable_default_handler() -> None: """Disable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler)
Disable the default handler of the HuggingFace Transformers's root logger.
11,623
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils _default_handler: Optional[logging.Handler] = None def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False The provided code snippet includes necessary dependencies for implementing the `enable_default_handler` function. Write a Python function `def enable_default_handler() -> None` to solve the following problem: Enable the default handler of the HuggingFace Transformers's root logger. Here is the function: def enable_default_handler() -> None: """Enable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler)
Enable the default handler of the HuggingFace Transformers's root logger.
11,624
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False logging.Logger.warning_advice = warning_advice import logging The provided code snippet includes necessary dependencies for implementing the `add_handler` function. Write a Python function `def add_handler(handler: logging.Handler) -> None` to solve the following problem: adds a handler to the HuggingFace Transformers's root logger. Here is the function: def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler)
adds a handler to the HuggingFace Transformers's root logger.
11,625
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False logging.Logger.warning_advice = warning_advice import logging The provided code snippet includes necessary dependencies for implementing the `remove_handler` function. Write a Python function `def remove_handler(handler: logging.Handler) -> None` to solve the following problem: removes given handler from the HuggingFace Transformers's root logger. Here is the function: def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler)
removes given handler from the HuggingFace Transformers's root logger.
11,626
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False The provided code snippet includes necessary dependencies for implementing the `disable_propagation` function. Write a Python function `def disable_propagation() -> None` to solve the following problem: Disable propagation of the library log outputs. Note that log propagation is disabled by default. Here is the function: def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
11,627
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False The provided code snippet includes necessary dependencies for implementing the `enable_propagation` function. Write a Python function `def enable_propagation() -> None` to solve the following problem: Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured. Here is the function: def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured.
11,628
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) logging.Logger.warning_advice = warning_advice import logging The provided code snippet includes necessary dependencies for implementing the `enable_explicit_format` function. Write a Python function `def enable_explicit_format() -> None` to solve the following problem: Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. Here is the function: def enable_explicit_format() -> None: """ Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter)
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method.
11,629
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) The provided code snippet includes necessary dependencies for implementing the `reset_format` function. Write a Python function `def reset_format() -> None` to solve the following problem: Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method. Here is the function: def reset_format() -> None: """ Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None)
Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method.
11,630
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils The provided code snippet includes necessary dependencies for implementing the `warning_advice` function. Write a Python function `def warning_advice(self, *args, **kwargs)` to solve the following problem: This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed Here is the function: def warning_advice(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs)
This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed
11,631
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils _tqdm_active = True The provided code snippet includes necessary dependencies for implementing the `is_progress_bar_enabled` function. Write a Python function `def is_progress_bar_enabled() -> bool` to solve the following problem: Return a boolean indicating whether tqdm progress bars are enabled. Here is the function: def is_progress_bar_enabled() -> bool: """Return a boolean indicating whether tqdm progress bars are enabled.""" global _tqdm_active return bool(_tqdm_active)
Return a boolean indicating whether tqdm progress bars are enabled.
11,632
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils _tqdm_active = True The provided code snippet includes necessary dependencies for implementing the `enable_progress_bar` function. Write a Python function `def enable_progress_bar()` to solve the following problem: Enable tqdm progress bar. Here is the function: def enable_progress_bar(): """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True hf_hub_utils.enable_progress_bars()
Enable tqdm progress bar.
11,633
import logging import os import sys import threading from logging import CRITICAL from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import NOTSET from logging import WARN from logging import WARNING from typing import Optional from tqdm import auto as tqdm_lib import huggingface_hub.utils as hf_hub_utils _tqdm_active = True The provided code snippet includes necessary dependencies for implementing the `disable_progress_bar` function. Write a Python function `def disable_progress_bar()` to solve the following problem: Disable tqdm progress bar. Here is the function: def disable_progress_bar(): """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False hf_hub_utils.disable_progress_bars()
Disable tqdm progress bar.
11,634
import functools import re import types def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator
null
11,635
import functools import re import types def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = f" The {class_name} forward method, overrides the `__call__` special method." note = r""" <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ fn.__doc__ = intro + note + docstring return fn return docstring_decorator
null
11,636
import functools import re import types def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator
null
11,637
import functools import re import types def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, "CTC": TF_SPEECH_CTC_SAMPLE, "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, } FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output="", expected_loss="", ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = dict( model_class=model_class, processor_class=processor_class, checkpoint=checkpoint, mask=mask, qa_target_start_index=qa_target_start_index, qa_target_end_index=qa_target_end_index, expected_output=expected_output, expected_loss=expected_loss, ) if "SequenceClassification" in model_class and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator
null
11,638
import functools import re import types def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator
null
11,639
import functools import re import types The provided code snippet includes necessary dependencies for implementing the `copy_func` function. Write a Python function `def copy_func(f)` to solve the following problem: Returns a copy of a function f. Here is the function: def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
Returns a copy of a function f.
11,640
from ..utils import DummyObject, requires_backends def load_tf_weights_in_tapas(*args, **kwargs): requires_backends(load_tf_weights_in_tapas, ["scatter"])
null
11,641
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata MODEL_MAPPING_NAMES = OrderedDict( [ # Base model mapping ("albert", "AlbertModel"), ("bart", "BartModel"), ("beit", "BeitModel"), ("bert", "BertModel"), ("bert-generation", "BertGenerationEncoder"), ("big_bird", "BigBirdModel"), ("bigbird_pegasus", "BigBirdPegasusModel"), ("blenderbot", "BlenderbotModel"), ("blenderbot-small", "BlenderbotSmallModel"), ("bloom", "BloomModel"), ("camembert", "CamembertModel"), ("canine", "CanineModel"), ("clip", "CLIPModel"), ("codegen", "CodeGenModel"), ("conditional_detr", "ConditionalDetrModel"), ("convbert", "ConvBertModel"), ("convnext", "ConvNextModel"), ("ctrl", "CTRLModel"), ("cvt", "CvtModel"), ("data2vec-audio", "Data2VecAudioModel"), ("data2vec-text", "Data2VecTextModel"), ("data2vec-vision", "Data2VecVisionModel"), ("deberta", "DebertaModel"), ("deberta-v2", "DebertaV2Model"), ("decision_transformer", "DecisionTransformerModel"), ("decision_transformer_gpt2", "DecisionTransformerGPT2Model"), ("deformable_detr", "DeformableDetrModel"), ("deit", "DeiTModel"), ("detr", "DetrModel"), ("distilbert", "DistilBertModel"), ("donut-swin", "DonutSwinModel"), ("dpr", "DPRQuestionEncoder"), ("dpt", "DPTModel"), ("electra", "ElectraModel"), ("ernie", "ErnieModel"), ("esm", "EsmModel"), ("flaubert", "FlaubertModel"), ("flava", "FlavaModel"), ("fnet", "FNetModel"), ("fsmt", "FSMTModel"), ("funnel", ("FunnelModel", "FunnelBaseModel")), ("glpn", "GLPNModel"), ("gpt2", "GPT2Model"), ("gpt_neo", "GPTNeoModel"), ("gpt_neox", "GPTNeoXModel"), ("gpt_neox_japanese", "GPTNeoXJapaneseModel"), ("gptj", "GPTJModel"), ("groupvit", "GroupViTModel"), ("hubert", "HubertModel"), ("ibert", "IBertModel"), ("imagegpt", "ImageGPTModel"), ("layoutlm", "LayoutLMModel"), ("layoutlmv2", "LayoutLMv2Model"), ("layoutlmv3", "LayoutLMv3Model"), ("led", "LEDModel"), ("levit", "LevitModel"), ("lilt", "LiltModel"), ("longformer", "LongformerModel"), ("longt5", "LongT5Model"), ("luke", "LukeModel"), ("lxmert", "LxmertModel"), ("m2m_100", "M2M100Model"), ("marian", "MarianModel"), ("markuplm", "MarkupLMModel"), ("maskformer", "MaskFormerModel"), ("mbart", "MBartModel"), ("mctct", "MCTCTModel"), ("megatron-bert", "MegatronBertModel"), ("mobilebert", "MobileBertModel"), ("mobilevit", "MobileViTModel"), ("mpnet", "MPNetModel"), ("mt5", "MT5Model"), ("mvp", "MvpModel"), ("nezha", "NezhaModel"), ("nllb", "M2M100Model"), ("nystromformer", "NystromformerModel"), ("openai-gpt", "OpenAIGPTModel"), ("opt", "OPTModel"), ("owlvit", "OwlViTModel"), ("pegasus", "PegasusModel"), ("pegasus_x", "PegasusXModel"), ("perceiver", "PerceiverModel"), ("plbart", "PLBartModel"), ("poolformer", "PoolFormerModel"), ("prophetnet", "ProphetNetModel"), ("qdqbert", "QDQBertModel"), ("reformer", "ReformerModel"), ("regnet", "RegNetModel"), ("rembert", "RemBertModel"), ("resnet", "ResNetModel"), ("retribert", "RetriBertModel"), ("roberta", "RobertaModel"), ("roformer", "RoFormerModel"), ("segformer", "SegformerModel"), ("sew", "SEWModel"), ("sew-d", "SEWDModel"), ("speech_to_text", "Speech2TextModel"), ("splinter", "SplinterModel"), ("squeezebert", "SqueezeBertModel"), ("swin", "SwinModel"), ("swinv2", "Swinv2Model"), ("t5", "T5Model"), ("table-transformer", "TableTransformerModel"), ("tapas", "TapasModel"), ("time_series_transformer", "TimeSeriesTransformerModel"), ("trajectory_transformer", "TrajectoryTransformerModel"), ("transfo-xl", "TransfoXLModel"), ("unispeech", "UniSpeechModel"), ("unispeech-sat", "UniSpeechSatModel"), ("van", "VanModel"), ("videomae", "VideoMAEModel"), ("vilt", "ViltModel"), ("vision-text-dual-encoder", "VisionTextDualEncoderModel"), ("visual_bert", "VisualBertModel"), ("vit", "ViTModel"), ("vit_mae", "ViTMAEModel"), ("vit_msn", "ViTMSNModel"), ("wav2vec2", "Wav2Vec2Model"), ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), ("wavlm", "WavLMModel"), ("whisper", "WhisperModel"), ("xclip", "XCLIPModel"), ("xglm", "XGLMModel"), ("xlm", "XLMModel"), ("xlm-prophetnet", "XLMProphetNetModel"), ("xlm-roberta", "XLMRobertaModel"), ("xlm-roberta-xl", "XLMRobertaXLModel"), ("xlnet", "XLNetModel"), ("yolos", "YolosModel"), ("yoso", "YosoModel"), ] ) MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( [ # Model for pre-training mapping ("albert", "AlbertForPreTraining"), ("bart", "BartForConditionalGeneration"), ("bert", "BertForPreTraining"), ("big_bird", "BigBirdForPreTraining"), ("bloom", "BloomForCausalLM"), ("camembert", "CamembertForMaskedLM"), ("ctrl", "CTRLLMHeadModel"), ("data2vec-text", "Data2VecTextForMaskedLM"), ("deberta", "DebertaForMaskedLM"), ("deberta-v2", "DebertaV2ForMaskedLM"), ("distilbert", "DistilBertForMaskedLM"), ("electra", "ElectraForPreTraining"), ("ernie", "ErnieForPreTraining"), ("flaubert", "FlaubertWithLMHeadModel"), ("flava", "FlavaForPreTraining"), ("fnet", "FNetForPreTraining"), ("fsmt", "FSMTForConditionalGeneration"), ("funnel", "FunnelForPreTraining"), ("gpt2", "GPT2LMHeadModel"), ("ibert", "IBertForMaskedLM"), ("layoutlm", "LayoutLMForMaskedLM"), ("longformer", "LongformerForMaskedLM"), ("luke", "LukeForMaskedLM"), ("lxmert", "LxmertForPreTraining"), ("megatron-bert", "MegatronBertForPreTraining"), ("mobilebert", "MobileBertForPreTraining"), ("mpnet", "MPNetForMaskedLM"), ("mvp", "MvpForConditionalGeneration"), ("nezha", "NezhaForPreTraining"), ("openai-gpt", "OpenAIGPTLMHeadModel"), ("retribert", "RetriBertModel"), ("roberta", "RobertaForMaskedLM"), ("splinter", "SplinterForPreTraining"), ("squeezebert", "SqueezeBertForMaskedLM"), ("t5", "T5ForConditionalGeneration"), ("tapas", "TapasForMaskedLM"), ("transfo-xl", "TransfoXLLMHeadModel"), ("unispeech", "UniSpeechForPreTraining"), ("unispeech-sat", "UniSpeechSatForPreTraining"), ("videomae", "VideoMAEForPreTraining"), ("visual_bert", "VisualBertForPreTraining"), ("vit_mae", "ViTMAEForPreTraining"), ("wav2vec2", "Wav2Vec2ForPreTraining"), ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"), ("xlm", "XLMWithLMHeadModel"), ("xlm-roberta", "XLMRobertaForMaskedLM"), ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), ("xlnet", "XLNetLMHeadModel"), ] ) MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( [ # Model for Causal LM mapping ("bart", "BartForCausalLM"), ("bert", "BertLMHeadModel"), ("bert-generation", "BertGenerationDecoder"), ("big_bird", "BigBirdForCausalLM"), ("bigbird_pegasus", "BigBirdPegasusForCausalLM"), ("blenderbot", "BlenderbotForCausalLM"), ("blenderbot-small", "BlenderbotSmallForCausalLM"), ("bloom", "BloomForCausalLM"), ("camembert", "CamembertForCausalLM"), ("codegen", "CodeGenForCausalLM"), ("ctrl", "CTRLLMHeadModel"), ("data2vec-text", "Data2VecTextForCausalLM"), ("electra", "ElectraForCausalLM"), ("ernie", "ErnieForCausalLM"), ("gpt2", "GPT2LMHeadModel"), ("gpt_neo", "GPTNeoForCausalLM"), ("gpt_neox", "GPTNeoXForCausalLM"), ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), ("gptj", "GPTJForCausalLM"), ("marian", "MarianForCausalLM"), ("mbart", "MBartForCausalLM"), ("megatron-bert", "MegatronBertForCausalLM"), ("mvp", "MvpForCausalLM"), ("openai-gpt", "OpenAIGPTLMHeadModel"), ("opt", "OPTForCausalLM"), ("pegasus", "PegasusForCausalLM"), ("plbart", "PLBartForCausalLM"), ("prophetnet", "ProphetNetForCausalLM"), ("qdqbert", "QDQBertLMHeadModel"), ("reformer", "ReformerModelWithLMHead"), ("rembert", "RemBertForCausalLM"), ("roberta", "RobertaForCausalLM"), ("roformer", "RoFormerForCausalLM"), ("speech_to_text_2", "Speech2Text2ForCausalLM"), ("transfo-xl", "TransfoXLLMHeadModel"), ("trocr", "TrOCRForCausalLM"), ("xglm", "XGLMForCausalLM"), ("xlm", "XLMWithLMHeadModel"), ("xlm-prophetnet", "XLMProphetNetForCausalLM"), ("xlm-roberta", "XLMRobertaForCausalLM"), ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"), ("xlnet", "XLNetLMHeadModel"), ] ) MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( [ ("deit", "DeiTForMaskedImageModeling"), ("swin", "SwinForMaskedImageModeling"), ("swinv2", "Swinv2ForMaskedImageModeling"), ("vit", "ViTForMaskedImageModeling"), ] ) MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Image Classification mapping ("beit", "BeitForImageClassification"), ("convnext", "ConvNextForImageClassification"), ("cvt", "CvtForImageClassification"), ("data2vec-vision", "Data2VecVisionForImageClassification"), ("deit", ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher")), ("imagegpt", "ImageGPTForImageClassification"), ("levit", ("LevitForImageClassification", "LevitForImageClassificationWithTeacher")), ("mobilevit", "MobileViTForImageClassification"), ( "perceiver", ( "PerceiverForImageClassificationLearned", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationConvProcessing", ), ), ("poolformer", "PoolFormerForImageClassification"), ("regnet", "RegNetForImageClassification"), ("resnet", "ResNetForImageClassification"), ("segformer", "SegformerForImageClassification"), ("swin", "SwinForImageClassification"), ("swinv2", "Swinv2ForImageClassification"), ("van", "VanForImageClassification"), ("vit", "ViTForImageClassification"), ("vit_msn", "ViTMSNForImageClassification"), ] ) MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( [ # Model for Semantic Segmentation mapping ("beit", "BeitForSemanticSegmentation"), ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"), ("dpt", "DPTForSemanticSegmentation"), ("mobilevit", "MobileViTForSemanticSegmentation"), ("segformer", "SegformerForSemanticSegmentation"), ] ) MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( [ # Model for Masked LM mapping ("albert", "AlbertForMaskedLM"), ("bart", "BartForConditionalGeneration"), ("bert", "BertForMaskedLM"), ("big_bird", "BigBirdForMaskedLM"), ("camembert", "CamembertForMaskedLM"), ("convbert", "ConvBertForMaskedLM"), ("data2vec-text", "Data2VecTextForMaskedLM"), ("deberta", "DebertaForMaskedLM"), ("deberta-v2", "DebertaV2ForMaskedLM"), ("distilbert", "DistilBertForMaskedLM"), ("electra", "ElectraForMaskedLM"), ("ernie", "ErnieForMaskedLM"), ("flaubert", "FlaubertWithLMHeadModel"), ("fnet", "FNetForMaskedLM"), ("funnel", "FunnelForMaskedLM"), ("ibert", "IBertForMaskedLM"), ("layoutlm", "LayoutLMForMaskedLM"), ("longformer", "LongformerForMaskedLM"), ("luke", "LukeForMaskedLM"), ("mbart", "MBartForConditionalGeneration"), ("megatron-bert", "MegatronBertForMaskedLM"), ("mobilebert", "MobileBertForMaskedLM"), ("mpnet", "MPNetForMaskedLM"), ("mvp", "MvpForConditionalGeneration"), ("nezha", "NezhaForMaskedLM"), ("nystromformer", "NystromformerForMaskedLM"), ("perceiver", "PerceiverForMaskedLM"), ("qdqbert", "QDQBertForMaskedLM"), ("reformer", "ReformerForMaskedLM"), ("rembert", "RemBertForMaskedLM"), ("roberta", "RobertaForMaskedLM"), ("roformer", "RoFormerForMaskedLM"), ("squeezebert", "SqueezeBertForMaskedLM"), ("tapas", "TapasForMaskedLM"), ("wav2vec2", "Wav2Vec2ForMaskedLM"), ("xlm", "XLMWithLMHeadModel"), ("xlm-roberta", "XLMRobertaForMaskedLM"), ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), ("yoso", "YosoForMaskedLM"), ] ) MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "BartForConditionalGeneration"), ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), ("blenderbot", "BlenderbotForConditionalGeneration"), ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "EncoderDecoderModel"), ("fsmt", "FSMTForConditionalGeneration"), ("led", "LEDForConditionalGeneration"), ("longt5", "LongT5ForConditionalGeneration"), ("m2m_100", "M2M100ForConditionalGeneration"), ("marian", "MarianMTModel"), ("mbart", "MBartForConditionalGeneration"), ("mt5", "MT5ForConditionalGeneration"), ("mvp", "MvpForConditionalGeneration"), ("nllb", "M2M100ForConditionalGeneration"), ("pegasus", "PegasusForConditionalGeneration"), ("pegasus_x", "PegasusXForConditionalGeneration"), ("plbart", "PLBartForConditionalGeneration"), ("prophetnet", "ProphetNetForConditionalGeneration"), ("t5", "T5ForConditionalGeneration"), ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"), ] ) MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( [ ("speech-encoder-decoder", "SpeechEncoderDecoderModel"), ("speech_to_text", "Speech2TextForConditionalGeneration"), ("whisper", "WhisperForConditionalGeneration"), ] ) MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "AlbertForSequenceClassification"), ("bart", "BartForSequenceClassification"), ("bert", "BertForSequenceClassification"), ("big_bird", "BigBirdForSequenceClassification"), ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"), ("bloom", "BloomForSequenceClassification"), ("camembert", "CamembertForSequenceClassification"), ("canine", "CanineForSequenceClassification"), ("convbert", "ConvBertForSequenceClassification"), ("ctrl", "CTRLForSequenceClassification"), ("data2vec-text", "Data2VecTextForSequenceClassification"), ("deberta", "DebertaForSequenceClassification"), ("deberta-v2", "DebertaV2ForSequenceClassification"), ("distilbert", "DistilBertForSequenceClassification"), ("electra", "ElectraForSequenceClassification"), ("ernie", "ErnieForSequenceClassification"), ("esm", "EsmForSequenceClassification"), ("flaubert", "FlaubertForSequenceClassification"), ("fnet", "FNetForSequenceClassification"), ("funnel", "FunnelForSequenceClassification"), ("gpt2", "GPT2ForSequenceClassification"), ("gpt_neo", "GPTNeoForSequenceClassification"), ("gptj", "GPTJForSequenceClassification"), ("ibert", "IBertForSequenceClassification"), ("layoutlm", "LayoutLMForSequenceClassification"), ("layoutlmv2", "LayoutLMv2ForSequenceClassification"), ("layoutlmv3", "LayoutLMv3ForSequenceClassification"), ("led", "LEDForSequenceClassification"), ("lilt", "LiltForSequenceClassification"), ("longformer", "LongformerForSequenceClassification"), ("luke", "LukeForSequenceClassification"), ("markuplm", "MarkupLMForSequenceClassification"), ("mbart", "MBartForSequenceClassification"), ("megatron-bert", "MegatronBertForSequenceClassification"), ("mobilebert", "MobileBertForSequenceClassification"), ("mpnet", "MPNetForSequenceClassification"), ("mvp", "MvpForSequenceClassification"), ("nezha", "NezhaForSequenceClassification"), ("nystromformer", "NystromformerForSequenceClassification"), ("openai-gpt", "OpenAIGPTForSequenceClassification"), ("opt", "OPTForSequenceClassification"), ("perceiver", "PerceiverForSequenceClassification"), ("plbart", "PLBartForSequenceClassification"), ("qdqbert", "QDQBertForSequenceClassification"), ("reformer", "ReformerForSequenceClassification"), ("rembert", "RemBertForSequenceClassification"), ("roberta", "RobertaForSequenceClassification"), ("roformer", "RoFormerForSequenceClassification"), ("squeezebert", "SqueezeBertForSequenceClassification"), ("tapas", "TapasForSequenceClassification"), ("transfo-xl", "TransfoXLForSequenceClassification"), ("xlm", "XLMForSequenceClassification"), ("xlm-roberta", "XLMRobertaForSequenceClassification"), ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"), ("xlnet", "XLNetForSequenceClassification"), ("yoso", "YosoForSequenceClassification"), ] ) MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ # Model for Question Answering mapping ("albert", "AlbertForQuestionAnswering"), ("bart", "BartForQuestionAnswering"), ("bert", "BertForQuestionAnswering"), ("big_bird", "BigBirdForQuestionAnswering"), ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"), ("bloom", "BloomForQuestionAnswering"), ("camembert", "CamembertForQuestionAnswering"), ("canine", "CanineForQuestionAnswering"), ("convbert", "ConvBertForQuestionAnswering"), ("data2vec-text", "Data2VecTextForQuestionAnswering"), ("deberta", "DebertaForQuestionAnswering"), ("deberta-v2", "DebertaV2ForQuestionAnswering"), ("distilbert", "DistilBertForQuestionAnswering"), ("electra", "ElectraForQuestionAnswering"), ("ernie", "ErnieForQuestionAnswering"), ("flaubert", "FlaubertForQuestionAnsweringSimple"), ("fnet", "FNetForQuestionAnswering"), ("funnel", "FunnelForQuestionAnswering"), ("gptj", "GPTJForQuestionAnswering"), ("ibert", "IBertForQuestionAnswering"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ("led", "LEDForQuestionAnswering"), ("lilt", "LiltForQuestionAnswering"), ("longformer", "LongformerForQuestionAnswering"), ("luke", "LukeForQuestionAnswering"), ("lxmert", "LxmertForQuestionAnswering"), ("markuplm", "MarkupLMForQuestionAnswering"), ("mbart", "MBartForQuestionAnswering"), ("megatron-bert", "MegatronBertForQuestionAnswering"), ("mobilebert", "MobileBertForQuestionAnswering"), ("mpnet", "MPNetForQuestionAnswering"), ("mvp", "MvpForQuestionAnswering"), ("nezha", "NezhaForQuestionAnswering"), ("nystromformer", "NystromformerForQuestionAnswering"), ("opt", "OPTForQuestionAnswering"), ("qdqbert", "QDQBertForQuestionAnswering"), ("reformer", "ReformerForQuestionAnswering"), ("rembert", "RemBertForQuestionAnswering"), ("roberta", "RobertaForQuestionAnswering"), ("roformer", "RoFormerForQuestionAnswering"), ("splinter", "SplinterForQuestionAnswering"), ("squeezebert", "SqueezeBertForQuestionAnswering"), ("xlm", "XLMForQuestionAnsweringSimple"), ("xlm-roberta", "XLMRobertaForQuestionAnswering"), ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"), ("xlnet", "XLNetForQuestionAnsweringSimple"), ("yoso", "YosoForQuestionAnswering"), ] ) MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ ("layoutlm", "LayoutLMForQuestionAnswering"), ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), ] ) MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Token Classification mapping ("albert", "AlbertForTokenClassification"), ("bert", "BertForTokenClassification"), ("big_bird", "BigBirdForTokenClassification"), ("bloom", "BloomForTokenClassification"), ("camembert", "CamembertForTokenClassification"), ("canine", "CanineForTokenClassification"), ("convbert", "ConvBertForTokenClassification"), ("data2vec-text", "Data2VecTextForTokenClassification"), ("deberta", "DebertaForTokenClassification"), ("deberta-v2", "DebertaV2ForTokenClassification"), ("distilbert", "DistilBertForTokenClassification"), ("electra", "ElectraForTokenClassification"), ("ernie", "ErnieForTokenClassification"), ("esm", "EsmForTokenClassification"), ("flaubert", "FlaubertForTokenClassification"), ("fnet", "FNetForTokenClassification"), ("funnel", "FunnelForTokenClassification"), ("gpt2", "GPT2ForTokenClassification"), ("ibert", "IBertForTokenClassification"), ("layoutlm", "LayoutLMForTokenClassification"), ("layoutlmv2", "LayoutLMv2ForTokenClassification"), ("layoutlmv3", "LayoutLMv3ForTokenClassification"), ("lilt", "LiltForTokenClassification"), ("longformer", "LongformerForTokenClassification"), ("luke", "LukeForTokenClassification"), ("markuplm", "MarkupLMForTokenClassification"), ("megatron-bert", "MegatronBertForTokenClassification"), ("mobilebert", "MobileBertForTokenClassification"), ("mpnet", "MPNetForTokenClassification"), ("nezha", "NezhaForTokenClassification"), ("nystromformer", "NystromformerForTokenClassification"), ("qdqbert", "QDQBertForTokenClassification"), ("rembert", "RemBertForTokenClassification"), ("roberta", "RobertaForTokenClassification"), ("roformer", "RoFormerForTokenClassification"), ("squeezebert", "SqueezeBertForTokenClassification"), ("xlm", "XLMForTokenClassification"), ("xlm-roberta", "XLMRobertaForTokenClassification"), ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"), ("xlnet", "XLNetForTokenClassification"), ("yoso", "YosoForTokenClassification"), ] ) MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "AlbertForMultipleChoice"), ("bert", "BertForMultipleChoice"), ("big_bird", "BigBirdForMultipleChoice"), ("camembert", "CamembertForMultipleChoice"), ("canine", "CanineForMultipleChoice"), ("convbert", "ConvBertForMultipleChoice"), ("data2vec-text", "Data2VecTextForMultipleChoice"), ("deberta-v2", "DebertaV2ForMultipleChoice"), ("distilbert", "DistilBertForMultipleChoice"), ("electra", "ElectraForMultipleChoice"), ("ernie", "ErnieForMultipleChoice"), ("flaubert", "FlaubertForMultipleChoice"), ("fnet", "FNetForMultipleChoice"), ("funnel", "FunnelForMultipleChoice"), ("ibert", "IBertForMultipleChoice"), ("longformer", "LongformerForMultipleChoice"), ("luke", "LukeForMultipleChoice"), ("megatron-bert", "MegatronBertForMultipleChoice"), ("mobilebert", "MobileBertForMultipleChoice"), ("mpnet", "MPNetForMultipleChoice"), ("nezha", "NezhaForMultipleChoice"), ("nystromformer", "NystromformerForMultipleChoice"), ("qdqbert", "QDQBertForMultipleChoice"), ("rembert", "RemBertForMultipleChoice"), ("roberta", "RobertaForMultipleChoice"), ("roformer", "RoFormerForMultipleChoice"), ("squeezebert", "SqueezeBertForMultipleChoice"), ("xlm", "XLMForMultipleChoice"), ("xlm-roberta", "XLMRobertaForMultipleChoice"), ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"), ("xlnet", "XLNetForMultipleChoice"), ("yoso", "YosoForMultipleChoice"), ] ) MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( [ ("bert", "BertForNextSentencePrediction"), ("ernie", "ErnieForNextSentencePrediction"), ("fnet", "FNetForNextSentencePrediction"), ("megatron-bert", "MegatronBertForNextSentencePrediction"), ("mobilebert", "MobileBertForNextSentencePrediction"), ("nezha", "NezhaForNextSentencePrediction"), ("qdqbert", "QDQBertForNextSentencePrediction"), ] ) MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Audio Classification mapping ("data2vec-audio", "Data2VecAudioForSequenceClassification"), ("hubert", "HubertForSequenceClassification"), ("sew", "SEWForSequenceClassification"), ("sew-d", "SEWDForSequenceClassification"), ("unispeech", "UniSpeechForSequenceClassification"), ("unispeech-sat", "UniSpeechSatForSequenceClassification"), ("wav2vec2", "Wav2Vec2ForSequenceClassification"), ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"), ("wavlm", "WavLMForSequenceClassification"), ] ) MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict( [ # Model for Connectionist temporal classification (CTC) mapping ("data2vec-audio", "Data2VecAudioForCTC"), ("hubert", "HubertForCTC"), ("mctct", "MCTCTForCTC"), ("sew", "SEWForCTC"), ("sew-d", "SEWDForCTC"), ("unispeech", "UniSpeechForCTC"), ("unispeech-sat", "UniSpeechSatForCTC"), ("wav2vec2", "Wav2Vec2ForCTC"), ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"), ("wavlm", "WavLMForCTC"), ] ) def _generate_supported_model_class_names( model_name: Type[PretrainedConfig], supported_tasks: Optional[Union[str, List[str]]] = None, ) -> List[str]: task_mapping = { "default": MODEL_MAPPING_NAMES, "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES, "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "ctc": MODEL_FOR_CTC_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, } if supported_tasks is None: supported_tasks = task_mapping.keys() if isinstance(supported_tasks, str): supported_tasks = [supported_tasks] model_class_names = [] for task in supported_tasks: class_name = task_mapping[task].get(model_name, None) if class_name: model_class_names.append(class_name) return model_class_names
null
11,642
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_embedding(self, input): return torch.empty(*input.shape, self.weight.shape[-1], device="meta")
null
11,643
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_functional_embedding( input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False ): return torch.empty(*input.shape, weight.shape[-1], device="meta")
null
11,644
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_nn_layernorm(self, input): return input
null
11,645
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_nn_groupnorm(self, input): return input
null
11,646
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_linear(self, input): return torch.empty(input.shape[:-1] + (self.out_features,), device="meta")
null
11,647
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_relu(x): return x
null
11,648
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_nn_relu(self, x): return x
null
11,649
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_nn_functional_relu(x, inplace=False): if not inplace: raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") return x
null
11,650
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_where(condition, x, y): # torch.where returns the broadcasted tensor of condition, x, and y, # so hack it by using addition return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta")
null
11,651
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_abs(input, *, out=None): if out is not None: raise ValueError("Don't support in-place abs for MetaTensor analysis") return input
null
11,652
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_arange(*args, **kwargs): n = len(args) step = 1 if n == 1: start = 0 end = args[0] elif n == 2: start, end = args else: start, end, step = args if isinstance(start, float): start = int(start) if isinstance(end, float): start = int(end) if isinstance(step, float): step = int(step) step = kwargs.get("step", step) dtype = kwargs.get("dtype") return torch.empty((end - start) // step, dtype=dtype, device="meta")
null
11,653
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_cat(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + dim shapes = [t.shape for t in tensors] shape = list(shapes[0]) concatenated_dim = sum(shape[dim] for shape in shapes) final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :] return torch.empty(final_shape, device="meta")
null
11,654
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_stack(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + 1 + dim shape = list(tensors[0].shape) shape.insert(dim, len(tensors)) return torch.empty(shape, device="meta")
null
11,655
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_mul(input, other, *, out=None): def torch_tensor_mul(self, other): return torch_mul(self, other)
null
11,656
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_matmul(input, other, *, out=None): d1 = input.dim() d2 = other.dim() shape = None if d1 == 1 and d2 == 1: shape = None elif d1 == 2 and d2 == 2: shape = (input.size(0), other.size(1)) elif d1 == 1 and d2 == 2: shape = (other.size(1),) elif d1 == 2 and d1 == 1: shape = (input.size(0),) else: max_length = max(input.dim(), other.dim()) shape1 = list(input.shape) shape2 = list(other.shape) if d1 == 1: shape1 = [1] + shape1 if d2 == 1: shape2.append(1) shape1 = [-1] * (max_length - d1) + list(input.shape) shape2 = [-1] * (max_length - d2) + list(other.shape) shape = [] for i in range(max_length): shape.append(max(shape1[i], shape2[i])) shape[-2] = shape1[-2] shape[-1] = shape2[-1] if d1 == 1: shape.pop(-2) if d2 == 1: shape.pop(-1) if shape is None: return torch.tensor(0.0, device="meta") return torch.empty(*shape, device="meta")
null
11,657
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): if out is not None: raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") return torch_bmm(batch1, batch2) def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out)
null
11,658
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_einsum(equation, *operands): # TODO: infer shape without performing the computation, this might be quite hard. concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) return torch.einsum(equation, *concrete_operands).to("meta")
null
11,659
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_tensor_repeat(self, *sizes): shape = list(self.shape) for i, x in enumerate(sizes): shape[i] *= x return torch.empty(shape, device="meta")
null
11,660
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_index_select(input, dim, index, *, out=None): shape = list(input.shape) shape[dim] = len(index) return torch.empty(*shape, device="meta") def torch_tensor_index_select(self, dim, index): return torch_index_select(self, dim, index)
null
11,661
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_roll(input, shifts, dims=None): return input
null
11,662
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_flip(input, dims): return input
null
11,663
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_tensor_flip(self, dims): return self
null
11,664
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_conv1d(self, input): l_in = input.shape[-1] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) l_out = math.floor( (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) shape[-1] = l_out shape[-2] = self.out_channels return torch.empty(shape, device="meta")
null
11,665
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_conv2d(self, input): h_in, w_in = input.shape[-2:] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) h_out = math.floor( (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) w_out = math.floor( (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1 ) shape[-2:] = [h_out, w_out] shape[-3] = self.out_channels return torch.empty(shape, device="meta")
null
11,666
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_squeeze(input, dim=None): shape = list(input.shape) if dim is not None: if dim < 0: dim = input.dim() + dim if shape[dim] == 1: shape.pop(dim) else: new_shape = [] for dim_value in shape: if dim_value == 1: continue new_shape.append(dim_value) shape = new_shape return torch.empty(shape, device="meta") def torch_tensor_squeeze(self, dim=None): return torch_squeeze(self, dim)
null
11,667
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def torch_unsqueeze(input, dim): shape = list(input.shape) if dim < 0: dim = input.dim() + 1 + dim shape.insert(dim, 1) return torch.empty(shape, device="meta") def torch_tensor_unsqueeze(self, dim): return torch_unsqueeze(self, dim)
null
11,668
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_unique_consecutive(input, **kwargs): output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs) if isinstance(output, torch.Tensor): return output.to("meta") else: return tuple(map(output, lambda x: x.to("meta")))
null
11,669
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_functional_one_hot(tensor, num_classes=-1): if num_classes < 0: raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis") shape = list(tensor.shape) + [num_classes] return torch.empty(shape, device="meta")
null
11,670
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_mseloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta")
null
11,671
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_crossentropyloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta")
null
11,672
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def torch_nn_bcewithlogitsloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta")
null
11,673
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def operator_getitem(a, b): def to_concrete(t): if isinstance(t, torch.Tensor): concrete = torch.ones_like(t, device="cpu") if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]: concrete = concrete.to(torch.int64) return concrete return t if isinstance(a, torch.Tensor): # TODO: infer shape without performing the computation. if isinstance(b, tuple): b = tuple(map(to_concrete, b)) else: b = to_concrete(b) return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta") return operator.getitem(a, b)
null
11,674
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata class HFProxy(Proxy): """ Proxy that uses metadata to handle data-dependent control-flow. """ def install_metadata(self, metadata): self._metadata = metadata def shape(self): return self.tracer.create_proxy("call_method", "size", (self,), {}) def dtype(self): if hasattr(self, "_metadata") and self._metadata is not None: return self._metadata.dtype return self.tracer.create_proxy("call_function", builtins.getattr, (self, "dtype"), {}) def device(self): # Hack so we can track when devices are used. During meta-tensor propagation, # replace these values with a constant 'meta' return MetaDeviceAttribute(self, "device") def __len__(self): if hasattr(self, "_metadata") and self._metadata is not None: return len(self._metadata) return super().__len__() def __bool__(self): if hasattr(self, "_metadata") and self._metadata is not None: return self._metadata return super().__bool__() def __getattr__(self, k): if k == "_metadata": return self.__getattribute__(k) # note: not added to the graph yet, if this is a method call # we peephole optimize to the method invocation return HFAttribute(self, k) def __setitem__(self, indices, values): return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {}) def __contains__(self, key): if hasattr(self, "_metadata") and self._metadata is not None: return key in self._metadata return super().__contains__(key) class MetaDeviceAttribute(HFAttribute): pass try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) The provided code snippet includes necessary dependencies for implementing the `_proxies_to_metas` function. Write a Python function `def _proxies_to_metas(v)` to solve the following problem: Returns the underlying metadata for HFProxies, and behaves like the identity for the others. Here is the function: def _proxies_to_metas(v): """Returns the underlying metadata for HFProxies, and behaves like the identity for the others.""" if isinstance(v, MetaDeviceAttribute): return "meta" if isinstance(v, torch.fx.Proxy): if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")): raise RuntimeError(f"No metadata was found for {v}") return v._metadata return v
Returns the underlying metadata for HFProxies, and behaves like the identity for the others.
11,675
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def _gen_constructor_wrapper(target): @functools.wraps(target) def wrapper(*args, **kwargs): proxy = None def check_has_proxy(v): if isinstance(v, Proxy): nonlocal proxy proxy = v torch.fx.node.map_aggregate(args, check_has_proxy) torch.fx.node.map_aggregate(kwargs, check_has_proxy) if proxy is not None: return proxy.tracer.create_proxy("call_function", target, args, kwargs) else: return target(*args, **kwargs) return wrapper, target
null
11,676
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None): if forbidden_values is None: forbidden_values = [] value = random.randint(low, high) while value in forbidden_values: value = random.randint(low, high) return value
null
11,677
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from packaging import version from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata class HFTracer(Tracer): """ Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the regular PyTorch torch.fx.Proxy. """ # Feature flag for proxying accesses to buffer values proxy_buffer_attributes: bool = True allow_insert_stateless_mods: bool = True _TORCH_METHODS_TO_PATCH = ["arange", "zeros", "ones", "full", "full_like", "eye", "empty", "tensor"] def __init__(self, autowrap_modules=(math,), autowrap_functions=()): super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions) if not is_torch_fx_available(): torch_version = version.parse(importlib_metadata.version("torch")) raise ImportError( f"Found an incompatible version of torch. Found version {torch_version}, but only version " f"{TORCH_FX_REQUIRED_VERSION} is supported." ) def _generate_dummy_input( self, model: PreTrainedModel, input_name: str, shape: List[int] ) -> Dict[str, torch.Tensor]: """Generates dummy input for model inference recording.""" # Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored # from pickle, or from the "__class__" attribute in the general case. model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__ device = model.device inputs_dict = {} if input_name in ["labels", "start_positions", "end_positions"]: batch_size = shape[0] if model_class_name in [ *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): if not hasattr(model.config, "problem_type") or model.config.problem_type is None: raise ValueError( "Could not retrieve the problem type for the sequence classification task, please set " 'model.config.problem_type to one of the following values: "regression", ' '"single_label_classification", or "multi_label_classification".' ) if model.config.problem_type == "regression": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 elif model.config.problem_type == "single_label_classification": labels_shape = (batch_size,) labels_dtype = torch.long elif model.config.problem_type == "multi_label_classification": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 else: raise ValueError( 'Expected model.config.problem_type to be either: "regression", "single_label_classification"' f', or "multi_label_classification", but "{model.config.problem_type}" was provided.' ) inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) elif model_class_name in [ *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), "GPT2DoubleHeadsModel", ]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) else: raise NotImplementedError( f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." ) elif "pixel_values" in input_name: batch_size = shape[0] image_size = getattr(model.config, "image_size", None) if image_size is None: if hasattr(model.config, "vision_config"): image_size = model.config.vision_config.image_size elif hasattr(model.config, "encoder"): image_size = model.config.encoder.image_size else: image_size = (_generate_random_int(), _generate_random_int()) # If no num_channels is in the config, use some arbitrary value. num_channels = getattr(model.config, "num_channels", 3) if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) height, width = image_size inputs_dict[input_name] = torch.zeros( batch_size, num_channels, height, width, dtype=torch.float32, device=device ) elif "bbox" in input_name: inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device) elif "input_features" in input_name: inputs_dict[input_name] = torch.zeros( *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device ) elif "visual_feats" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_feat_dim, ], dtype=torch.float, device=device, ) elif "visual_pos" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_pos_dim, ], dtype=torch.float, device=device, ) elif "inputs" in input_name: inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device) elif "input_values" in input_name: batch_size, _ = shape # Generating big sequence length for audio inputs. seq_length = _generate_random_int(low=10000, high=20000) inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device) elif "mask" in input_name or "ids" in input_name: inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device) else: shape_with_hidden_size = shape + [model.config.hidden_size] inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device) return inputs_dict def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) if kind == "placeholder" and target in self.meta_args: rv.install_metadata(self.meta_args[target]) return rv if target in self.orig_fns: # NOTE: tensor constructors in PyTorch define the `device` argument as # *kwargs-only*. That is why this works. If you add methods to # _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only, # this will break and you will likely see issues where we cannot infer # the size of the output. if "device" in kwargs: kwargs["device"] = "meta" try: args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas) kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas) if kind == "call_function": meta_target = _MANUAL_META_OVERRIDES.get(target, target) meta_out = meta_target(*args_metas, **kwargs_metas) if isinstance(meta_out, torch.Tensor): meta_out = meta_out.to(device="meta") elif kind == "call_method": method = getattr(args_metas[0].__class__, target) meta_target = _MANUAL_META_OVERRIDES.get(method, method) meta_out = meta_target(*args_metas, **kwargs_metas) elif kind == "call_module": if not hasattr(self, "orig_forward"): raise AttributeError(f"{self} does not have an attribute called orig_forward") self._disable_module_getattr = True try: mod = self.root.get_submodule(target) mod_type = type(mod) if mod_type in _MANUAL_META_OVERRIDES: meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas) else: meta_out = self.orig_forward(*args_metas, **kwargs_metas) finally: self._disable_module_getattr = False elif kind == "get_attr": self._disable_module_getattr = True try: attr_itr = self.root atoms = target.split(".") for atom in atoms: attr_itr = getattr(attr_itr, atom) if isinstance(attr_itr, torch.Tensor): meta_out = attr_itr.to(device="meta") else: meta_out = attr_itr finally: self._disable_module_getattr = False else: return rv if not isinstance(rv, Proxy): raise ValueError("Don't support composite output yet") rv.install_metadata(meta_out) except Exception as e: if _IS_IN_DEBUG_MODE: warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") return rv # Replaced by .getattr from PyTorch 1.13 def _module_getattr(self, attr, attr_val, parameter_proxy_cache): if getattr(self, "_disable_module_getattr", False): return attr_val else: def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): for n, p in collection_to_search: if attr_val is p: if n not in parameter_proxy_cache: kwargs = {} if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: kwargs["proxy_factory_fn"] = ( None if not self.param_shapes_constant else lambda node: ParameterProxy(self, node, n, attr_val) ) val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] parameter_proxy_cache[n] = val_proxy return parameter_proxy_cache[n] return None if isinstance(attr_val, torch.nn.Parameter): maybe_parameter_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_parameters(), parameter_proxy_cache ) if maybe_parameter_proxy is not None: return maybe_parameter_proxy if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): maybe_buffer_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_buffers(), parameter_proxy_cache ) if maybe_buffer_proxy is not None: return maybe_buffer_proxy return attr_val # Needed for PyTorch 1.13+ def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): return self._module_getattr(attr, attr_val, parameter_proxy_cache) def call_module(self, m, forward, args, kwargs): self.orig_forward = forward return super().call_module(m, forward, args, kwargs) def proxy(self, node): return HFProxy(node, self) def trace( self, root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[Dict[str, Any]] = None, dummy_inputs: Optional[Dict[str, Any]] = None, complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, ) -> Graph: """ Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a `torch.nn.Module` instance to use as the root and add embedded constants to. Args: root (`torch.nn.Module` or `Callable`): Either a `torch.nn.Module`` or a function to be traced through. If root is not a [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail. concrete_args (`Dict[str, Any], *optional*): Concrete arguments that should not be treated as Proxies dummy_inputs (`Dict[str, Any]`, *optional*): The dummy inputs needed to handle data-dependent control-flow if `root` is not a [`~transformers.PreTrainedModel`]. It can also be used when `root` is a [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs. complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`): If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing. Returns: `torch.fx.Graph`: A FX `torch.fx.Graph` representing the semantics of the passed-in `root`. """ sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) if concrete_args is None: concrete_args = {} if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: for param in sig.parameters.values(): if param.name in dummy_inputs: continue if param.default is inspect.Parameter.empty: raise ValueError(f"You need to specify a default value for the parameter {param.name}.") concrete_args.update({p.name: p.default for p in sig.parameters.values() if p.name not in dummy_inputs}) input_names = sig.parameters.keys() - concrete_args.keys() # Creating a random input shape to generate dummy inputs. batch_size = _generate_random_int() sequence_length = _generate_random_int() shape = [batch_size, sequence_length] if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): num_choices = _generate_random_int(low=2, high=5) shape.insert(1, num_choices) inputs = dict(dummy_inputs) if dummy_inputs is not None else {} for input_name in input_names: if input_name in inputs: continue # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to # be able to use HFTracer._generate_dummy_input. if isinstance(root, PreTrainedModel) or type(root).__qualname__.startswith("_deserialize_graph_module"): inputs.update(self._generate_dummy_input(root, input_name, shape)) else: raise RuntimeError( f"Could not generate input named {input_name} for because root is not a" " transformers.PreTrainedModel." ) concrete_metas = { input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_ for input_name, input_ in inputs.items() } for param in sig.parameters.values(): if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: concrete_metas[f"**{param.name}"] = {} self.meta_args = concrete_metas self.patched_torch_methods = { target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH } self.orig_fns = set() for name, (wrapper, orig) in self.patched_torch_methods.items(): setattr(torch, name, wrapper) self.orig_fns.add(orig) try: self.graph = super().trace(root, concrete_args=concrete_args) finally: for name, (_, orig) in self.patched_torch_methods.items(): setattr(torch, name, orig) # This is necessary because concrete args are added as input to the traced module since # https://github.com/pytorch/pytorch/pull/55888. for node in self.graph.nodes: if node.op == "placeholder": # Removing default values for inputs as the forward pass will fail with them. if node.target in input_names: node.args = () # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. # It cannot infer on the attributes and methods the input should have, and fails. node.type = torch.Tensor # It is a concrete arg so it is not used and should be removed. else: to_visit = [node] to_delete = collections.OrderedDict() while to_visit: n = to_visit.pop(0) to_delete[n] = None to_visit += list(n.users.keys()) for user in reversed(to_delete.keys()): self.graph.erase_node(user) # TODO: solves GraphModule creation. # Without this, return type annotation "Tuple" is causing code execution failure. if node.op == "output": node.type = None return self.graph def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool: """ Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module because its attributes are input-dependent. """ return any(isinstance(attr, Proxy) for attr in mod.__dict__.values()) def _insert_module_as_submodule(self, mod: nn.Module) -> str: """ Helper method which tries to insert a module that was not declared as submodule. """ # If one of the module attributes is a Proxy, it means that its instantiation is input-dependent. # It is not possible to insert such modules, those should be traced through. if self._stateless_mod_instanciation_depends_on_proxies(mod): return "" idx = 0 mod_name = mod.__class__.__name__.lower() path = f"{mod_name}_{idx}" already_inserted = False while hasattr(self.root, path): if getattr(self.root, path) is mod: already_inserted = True break path = f"{mod_name}_{idx}" idx += 1 # No need to add multiple instances of the same module. if not already_inserted: self.root.add_module(path, mod) return path def path_of_module(self, mod: nn.Module) -> str: """ Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the string "foo.bar". Args: mod (str): The `Module` to retrieve the qualified name for. """ try: return super().path_of_module(mod) except NameError as e: if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: path = self._insert_module_as_submodule(mod) return path raise e def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module( m, module_qualified_name ) def get_concrete_args(model: nn.Module, input_names: List[str]): sig = inspect.signature(model.forward) if not (set(input_names) <= set(sig.parameters.keys())): formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names) formatted_allowed_input_names = ", ".join(sig.parameters.keys()) raise ValueError( f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:" f" {formatted_allowed_input_names}" ) return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names} def check_if_model_is_supported(model: PreTrainedModel): if model.__class__.__name__ not in _SUPPORTED_MODELS: supported_model_names = ", ".join(_SUPPORTED_MODELS) raise NotImplementedError( f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}" ) try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) The provided code snippet includes necessary dependencies for implementing the `symbolic_trace` function. Write a Python function `def symbolic_trace( model: PreTrainedModel, input_names: Optional[List[str]] = None, disable_check: bool = False, ) -> GraphModule` to solve the following problem: Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) ``` Here is the function: def symbolic_trace( model: PreTrainedModel, input_names: Optional[List[str]] = None, disable_check: bool = False, ) -> GraphModule: """ Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) ``` """ if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) concrete_args = get_concrete_args(model, input_names) if not disable_check: check_if_model_is_supported(model) # Tracing. tracer = HFTracer() traced_graph = tracer.trace(model, concrete_args=concrete_args) traced = torch.fx.GraphModule(model, traced_graph) traced.config = model.config # The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus # _generate_dummy_input, where the model class is needed. traced.class_for_deserialization = model.__class__ traced.device = model.device return traced
Performs symbolic tracing on the model. Args: model ([`PretrainedModel`]): The model to trace. input_names (`List[str]`, *optional*): The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead. disable_check (`bool`, *optional*, defaults to `False`): If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes. Returns: `torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model. Example: ```python from transformers.utils.fx import symbolic_trace traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"]) ```
11,678
from math import ceil def assert_device_map(device_map, num_blocks): blocks = list(range(0, num_blocks)) device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] # Duplicate check duplicate_blocks = [] for i in device_map_blocks: if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: duplicate_blocks.append(i) # Missing blocks missing_blocks = [i for i in blocks if i not in device_map_blocks] extra_blocks = [i for i in device_map_blocks if i not in blocks] if len(duplicate_blocks) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(duplicate_blocks) ) if len(missing_blocks) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(missing_blocks) ) if len(extra_blocks) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(extra_blocks) )
null
11,679
from math import ceil The provided code snippet includes necessary dependencies for implementing the `get_device_map` function. Write a Python function `def get_device_map(n_layers, devices)` to solve the following problem: Returns a dictionary of layers distributed evenly across all devices. Here is the function: def get_device_map(n_layers, devices): """Returns a dictionary of layers distributed evenly across all devices.""" layers = list(range(n_layers)) n_blocks = int(ceil(n_layers / len(devices))) layers_list = list(layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)) return dict(zip(devices, layers_list))
Returns a dictionary of layers distributed evenly across all devices.
11,680
from copy import deepcopy from transformers.utils import is_accelerate_available, is_bitsandbytes_available The provided code snippet includes necessary dependencies for implementing the `replace_8bit_linear` function. Write a Python function `def replace_8bit_linear(model, threshold=6.0, modules_to_not_convert="lm_head")` to solve the following problem: A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` library. This will enable running your models using mixed int8 precision as described by the paper `GPT3.int8(): 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ bitsandbytes` The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models (>=176B parameters). Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. threshold (`float`, *optional*, defaults to 6.0): `int8_threshold` for outlier detection as described in the formentioned paper. This parameters is set to `6.0` as described by the paper. modules_to_not_convert (`str`, *optional*, defaults to `lm_head`): Name of the module to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision for numerical stability reasons. Here is the function: def replace_8bit_linear(model, threshold=6.0, modules_to_not_convert="lm_head"): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` library. This will enable running your models using mixed int8 precision as described by the paper `GPT3.int8(): 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ bitsandbytes` The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models (>=176B parameters). Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. threshold (`float`, *optional*, defaults to 6.0): `int8_threshold` for outlier detection as described in the formentioned paper. This parameters is set to `6.0` as described by the paper. modules_to_not_convert (`str`, *optional*, defaults to `lm_head`): Name of the module to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision for numerical stability reasons. """ for name, module in model.named_children(): if len(list(module.children())) > 0: replace_8bit_linear(module, threshold, modules_to_not_convert) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: with init_empty_weights(): model._modules[name] = bnb.nn.Linear8bitLt( module.in_features, module.out_features, module.bias is not None, has_fp16_weights=False, threshold=threshold, ) return model
A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` library. This will enable running your models using mixed int8 precision as described by the paper `GPT3.int8(): 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ bitsandbytes` The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models (>=176B parameters). Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. threshold (`float`, *optional*, defaults to 6.0): `int8_threshold` for outlier detection as described in the formentioned paper. This parameters is set to `6.0` as described by the paper. modules_to_not_convert (`str`, *optional*, defaults to `lm_head`): Name of the module to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision for numerical stability reasons.
11,681
from copy import deepcopy from transformers.utils import is_accelerate_available, is_bitsandbytes_available The provided code snippet includes necessary dependencies for implementing the `get_keys_to_not_convert` function. Write a Python function `def get_keys_to_not_convert(model)` to solve the following problem: r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model Here is the function: def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model and tie the weights, then # check if it contains tied weights tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() tied_keys = list(find_tied_parameters(tied_model).values()) has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = not hasattr(model, model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return "" # otherwise they have an attached head list_modules = list(model.named_parameters()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = tied_keys + list(intersection) return [module_name.split(".")[0] for module_name in list_untouched]
r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model
11,682
from ..utils import DummyObject, requires_backends def tf_top_k_top_p_filtering(*args, **kwargs): requires_backends(tf_top_k_top_p_filtering, ["tf"])
null
11,683
from ..utils import DummyObject, requires_backends def shape_list(*args, **kwargs): requires_backends(shape_list, ["tf"])
null
11,684
from ..utils import DummyObject, requires_backends def create_optimizer(*args, **kwargs): requires_backends(create_optimizer, ["tf"])
null
11,685
import collections import json import logging import os from typing import Optional, Tuple import numpy as np from tqdm.auto import tqdm logger = logging.getLogger(__name__) The provided code snippet includes necessary dependencies for implementing the `postprocess_qa_predictions_with_beam_search` function. Write a Python function `def postprocess_qa_predictions_with_beam_search( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, start_n_top: int = 5, end_n_top: int = 5, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, )` to solve the following problem: Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) Here is the function: def postprocess_qa_predictions_with_beam_search( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, start_n_top: int = 5, end_n_top: int = 5, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """ Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """ if len(predictions) != 5: raise ValueError("`predictions` should be a tuple with five elements.") start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions if len(predictions[0]) != len(features): raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() if version_2_with_negative else None # Logging. logger.setLevel(log_level) logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_log_prob = start_top_log_probs[feature_index] start_indexes = start_top_index[feature_index] end_log_prob = end_top_log_probs[feature_index] end_indexes = end_top_index[feature_index] feature_null_score = cls_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get("token_is_max_context", None) # Update minimum null prediction if min_null_score is None or feature_null_score < min_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. for i in range(start_n_top): for j in range(end_n_top): start_index = int(start_indexes[i]) j_index = i * end_n_top + j end_index = int(end_indexes[j_index]) # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the # p_mask but let's not take any risk) if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or len(offset_mapping[start_index]) < 2 or offset_mapping[end_index] is None or len(offset_mapping[end_index]) < 2 ): continue # Don't consider answers with a length negative or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), "score": start_log_prob[i] + end_log_prob[j_index], "start_log_prob": start_log_prob[i], "end_log_prob": end_log_prob[j_index], } ) # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] # Use the offsets to gather the answer text in the original context. context = example["context"] for pred in predictions: offsets = pred.pop("offsets") pred["text"] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0: # Without predictions min_null_score is going to be None and None will cause an exception later min_null_score = -2e-6 predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": min_null_score}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop("score") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred["probability"] = prob # Pick the best prediction and set the probability for the null answer. all_predictions[example["id"]] = predictions[0]["text"] if version_2_with_negative: scores_diff_json[example["id"]] = float(min_null_score) # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example["id"]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f"{output_dir} is not a directory.") prediction_file = os.path.join( output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" ) nbest_file = os.path.join( output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" ) logger.info(f"Saving predictions to {prediction_file}.") with open(prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") logger.info(f"Saving nbest_preds to {nbest_file}.") with open(nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: logger.info(f"Saving null_odds to {null_odds_file}.") with open(null_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions, scores_diff_json
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as cls token predictions. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. start_n_top (:obj:`int`, `optional`, defaults to 5): The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. end_n_top (:obj:`int`, `optional`, defaults to 5): The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``)
11,686
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata)
null
11,687
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def can_convert_to_int(string): try: int(string) return True except ValueError: return False
null
11,688
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def can_convert_to_float(string): try: float(string) return True except ValueError: return False
null