code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy
class _snake_case :
def __init__( self : Union[str, Any], __lowercase : Dict, __lowercase : int ):
lowercase__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase__ = numpy.random.rand(
self.input_array.shape[1], 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase__ = numpy.random.rand(
4, 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase__ = numpy.random.rand(3, 1 )
# Real output values provided.
lowercase__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase__ = numpy.zeros(output_array.shape )
def A__ ( self : List[Any] ):
lowercase__ = sigmoid(
numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return self.layer_between_second_hidden_layer_and_output
def A__ ( self : Union[str, Any] ):
lowercase__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), )
lowercase__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T, numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), )
lowercase__ = numpy.dot(
self.input_array.T, numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), self.first_hidden_layer_and_second_hidden_layer_weights.T, )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ), )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A__ ( self : Any, __lowercase : List[Any], __lowercase : Optional[int], __lowercase : Optional[Any] ):
for iteration in range(1, iterations + 1 ):
lowercase__ = self.feedforward()
self.back_propagation()
if give_loss:
lowercase__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def A__ ( self : Optional[int], __lowercase : List[Any] ):
lowercase__ = input_arr
lowercase__ = sigmoid(
numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return 1 / (1 + numpy.exp(-value ))
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return (value) * (1 - (value))
def __lowerCAmelCase ( ):
lowercase__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase__ = TwoHiddenLayerNeuralNetwork(
input_array=UpperCAmelCase__ , output_array=UpperCAmelCase__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCAmelCase__ , iterations=10 , give_loss=UpperCAmelCase__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 713
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 0
|
import datasets
from .evaluate import evaluate
lowercase_ = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowercase_ = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowercase_ = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case ( datasets.Metric):
def A__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ), codebase_urls=["https://www.atticusprojectai.org/cuad"], reference_urls=["https://www.atticusprojectai.org/cuad"], )
def A__ ( self : List[Any], __lowercase : Tuple, __lowercase : str ):
lowercase__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowercase__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowercase__ = evaluate(dataset=__UpperCamelCase, predictions=__UpperCamelCase )
return score
| 714
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
from functools import lru_cache
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCamelCase )
if n > 1:
factors.add(_UpperCamelCase )
return factors
@lru_cache
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(unique_prime_factors(_UpperCamelCase ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(set(_UpperCamelCase ) ) in (0, 1)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(_UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(_UpperCamelCase ) for x in group]
checker.append(_UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 4 ):
lowercase__ = run(_UpperCamelCase )
return results[0] if len(_UpperCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _snake_case ( __UpperCAmelCase):
def A__ ( self : Tuple ):
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase, "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowercase, "num_attention_heads" ) )
self.parent.assertTrue(hasattr(__lowercase, "num_encoder_blocks" ) )
class _snake_case :
def __init__( self : int, __lowercase : Optional[Any], __lowercase : Union[str, Any]=13, __lowercase : Optional[Any]=64, __lowercase : Union[str, Any]=3, __lowercase : Optional[Any]=4, __lowercase : Union[str, Any]=[2, 2, 2, 2], __lowercase : List[str]=[8, 4, 2, 1], __lowercase : Optional[Any]=[16, 32, 64, 128], __lowercase : List[str]=[1, 4, 8, 16], __lowercase : int=[1, 2, 4, 8], __lowercase : Optional[Any]=True, __lowercase : List[str]=True, __lowercase : List[Any]="gelu", __lowercase : List[Any]=0.1, __lowercase : List[Any]=0.1, __lowercase : Dict=0.02, __lowercase : Dict=3, __lowercase : List[str]=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = sr_ratios
lowercase__ = depths
lowercase__ = hidden_sizes
lowercase__ = downsampling_rates
lowercase__ = num_attention_heads
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
def A__ ( self : List[Any] ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : Any ):
return SegformerConfig(
image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def A__ ( self : List[str], __lowercase : List[str], __lowercase : Any, __lowercase : str ):
lowercase__ = SegformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
lowercase__ = lowercase__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A__ ( self : Union[str, Any], __lowercase : List[str], __lowercase : Any, __lowercase : Optional[int] ):
lowercase__ = self.num_labels
lowercase__ = SegformerForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss, 0.0 )
def A__ ( self : str, __lowercase : Tuple, __lowercase : Dict, __lowercase : Optional[int] ):
lowercase__ = 1
lowercase__ = SegformerForSemanticSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size) ).to(__lowercase )
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertGreater(result.loss, 0.0 )
def A__ ( self : Optional[int] ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
UpperCamelCase__ : Optional[int] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : List[Any] =(
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : int =True
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Dict =False
def A__ ( self : Dict ):
lowercase__ = SegformerModelTester(self )
lowercase__ = SegformerConfigTester(self, config_class=__lowercase )
def A__ ( self : str ):
self.config_tester.run_common_tests()
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__lowercase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def A__ ( self : Optional[int] ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def A__ ( self : Any ):
pass
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : str ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.attentions
lowercase__ = sum(self.model_tester.depths )
self.assertEqual(len(__lowercase ), __lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(__lowercase ), __lowercase )
# verify the first attentions (first block, first layer)
lowercase__ = (self.model_tester.image_size // 4) ** 2
lowercase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
# verify the last attentions (last block, last layer)
lowercase__ = (self.model_tester.image_size // 32) ** 2
lowercase__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], )
lowercase__ = len(__lowercase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
self.assertEqual(out_len + 1, len(__lowercase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(__lowercase ), __lowercase )
# verify the first attentions (first block, first layer)
lowercase__ = (self.model_tester.image_size // 4) ** 2
lowercase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], )
def A__ ( self : List[str] ):
def check_hidden_states_output(__lowercase : Dict, __lowercase : Optional[Any], __lowercase : Union[str, Any] ):
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(__lowercase ), __lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def A__ ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ):
continue
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = model(**__lowercase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self : List[Any] ):
pass
@slow
def A__ ( self : Optional[int] ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = SegformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : Tuple ):
lowercase__ = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__lowercase, align=__lowercase, do_random_crop=__lowercase )
lowercase__ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
__lowercase )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" )
lowercase__ = encoded_inputs.pixel_values.to(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase )
lowercase__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], __lowercase, atol=1e-4 ) )
@slow
def A__ ( self : List[Any] ):
lowercase__ = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__lowercase, align=__lowercase, do_random_crop=__lowercase )
lowercase__ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(__lowercase )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" )
lowercase__ = encoded_inputs.pixel_values.to(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase )
lowercase__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], __lowercase, atol=1e-1 ) )
@slow
def A__ ( self : List[Any] ):
lowercase__ = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__lowercase, align=__lowercase, do_random_crop=__lowercase )
lowercase__ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
__lowercase )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" )
lowercase__ = encoded_inputs.pixel_values.to(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase )
lowercase__ = outputs.logits.detach().cpu()
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=__lowercase, target_sizes=[(500, 300)] )
lowercase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, __lowercase )
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
lowercase__ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape, __lowercase )
| 716
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 0
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class _snake_case ( __lowerCAmelCase):
UpperCamelCase__ : Any ="ernie_m"
UpperCamelCase__ : Dict[str, str] ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[int], __lowercase : int = 25_0002, __lowercase : int = 768, __lowercase : int = 12, __lowercase : int = 12, __lowercase : int = 3072, __lowercase : str = "gelu", __lowercase : float = 0.1, __lowercase : float = 0.1, __lowercase : int = 514, __lowercase : float = 0.02, __lowercase : int = 1, __lowercase : float = 1e-0_5, __lowercase : List[str]=None, __lowercase : List[Any]=False, __lowercase : Dict=0.0, **__lowercase : Optional[Any], ):
super().__init__(pad_token_id=lowerCamelCase__, **lowerCamelCase__ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = classifier_dropout
lowercase__ = is_decoder
lowercase__ = act_dropout
| 717
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37
| 0
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : List[Any], __lowercase : List[Any], __lowercase : Optional[Any]=13, __lowercase : Dict=[30, 30], __lowercase : int=2, __lowercase : Optional[int]=3, __lowercase : int=True, __lowercase : List[str]=True, __lowercase : Dict=32, __lowercase : List[str]=5, __lowercase : Union[str, Any]=4, __lowercase : Union[str, Any]=37, __lowercase : Any="gelu", __lowercase : int=0.1, __lowercase : List[str]=0.1, __lowercase : int=10, __lowercase : Tuple=0.02, __lowercase : Dict=3, __lowercase : Dict=None, __lowercase : List[Any]=8, __lowercase : Optional[Any]=10, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = n_targets
lowercase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ = num_patches + 1 + self.num_detection_tokens
def A__ ( self : Tuple ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ = {}
lowercase__ = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=__lowercase )
lowercase__ = torch.rand(self.n_targets, 4, device=__lowercase )
labels.append(__lowercase )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : List[str] ):
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__lowercase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def A__ ( self : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[str] ):
lowercase__ = YolosModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A__ ( self : Any, __lowercase : Tuple, __lowercase : int, __lowercase : List[str] ):
lowercase__ = YolosForObjectDetection(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(pixel_values=__lowercase )
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ = model(pixel_values=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def A__ ( self : Optional[int] ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase__ : Any =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase__ : Tuple =(
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase__ : Any =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : int =False
def A__ ( self : Tuple, __lowercase : Dict, __lowercase : Tuple, __lowercase : Optional[Any]=False ):
lowercase__ = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ = []
for i in range(self.model_tester.batch_size ):
lowercase__ = {}
lowercase__ = torch.ones(
size=(self.model_tester.n_targets,), device=__lowercase, dtype=torch.long )
lowercase__ = torch.ones(
self.model_tester.n_targets, 4, device=__lowercase, dtype=torch.float )
labels.append(__lowercase )
lowercase__ = labels
return inputs_dict
def A__ ( self : str ):
lowercase__ = YolosModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 )
def A__ ( self : Tuple ):
self.config_tester.run_common_tests()
def A__ ( self : Any ):
# YOLOS does not use inputs_embeds
pass
def A__ ( self : Optional[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase, nn.Linear ) )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
# in YOLOS, the seq_len is different
lowercase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(__lowercase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(__lowercase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowercase__ = len(__lowercase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = 1
self.assertEqual(out_len + added_hidden_states, len(__lowercase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(__lowercase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def A__ ( self : Optional[Any] ):
def check_hidden_states_output(__lowercase : List[Any], __lowercase : List[str], __lowercase : List[str] ):
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowercase ), __lowercase )
# YOLOS has a different seq_length
lowercase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowercase )
@slow
def A__ ( self : Any ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = YolosModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : List[str] ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def A__ ( self : int ):
lowercase__ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__lowercase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
lowercase__ = model(inputs.pixel_values )
# verify outputs
lowercase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], device=__lowercase, )
lowercase__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], __lowercase, atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], __lowercase, atol=1e-4 ) )
# verify postprocessing
lowercase__ = image_processor.post_process_object_detection(
__lowercase, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowercase__ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__lowercase )
lowercase__ = [75, 75, 17, 63, 17]
lowercase__ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__lowercase )
self.assertEqual(len(results["scores"] ), 5 )
self.assertTrue(torch.allclose(results["scores"], __lowercase, atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist(), __lowercase )
self.assertTrue(torch.allclose(results["boxes"][0, :], __lowercase ) )
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0B101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _snake_case :
def __init__( self : List[str] ):
lowercase__ = WATERMARK_BITS
lowercase__ = WatermarkEncoder()
self.encoder.set_watermark("bits", self.watermark )
def A__ ( self : Optional[int], __lowercase : Any ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
lowercase__ = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1 ).float().numpy()
lowercase__ = [self.encoder.encode(__lowercase, "dwtDct" ) for image in images]
lowercase__ = torch.from_numpy(np.array(__lowercase ) ).permute(0, 3, 1, 2 )
lowercase__ = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0 )
return images
| 719
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 0
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = [0] * no_of_processes
lowercase__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCamelCase__ ):
lowercase__ = burst_time[i]
lowercase__ = 0
lowercase__ = 0
lowercase__ = 9_9999_9999
lowercase__ = 0
lowercase__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowercase__ = remaining_time[j]
lowercase__ = j
lowercase__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowercase__ = remaining_time[short]
if minm == 0:
lowercase__ = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
lowercase__ = False
# Find finish time of current process
lowercase__ = increment_time + 1
# Calculate waiting time
lowercase__ = finish_time - arrival_time[short]
lowercase__ = finar - burst_time[short]
if waiting_time[short] < 0:
lowercase__ = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
lowercase__ = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
lowercase__ = 0
for i in range(lowerCamelCase__ ):
lowercase__ = total_waiting_time + waiting_time[i]
lowercase__ = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
lowercase_ = int(input())
lowercase_ = [0] * no_of_processes
lowercase_ = [0] * no_of_processes
lowercase_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
lowercase_ , lowercase_ = map(int, input().split())
lowercase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase_ = burst_time
lowercase_ = no_of_processes
lowercase_ = waiting_time
lowercase_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 720
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : Any, __lowercase : Tuple, __lowercase : Optional[Any]=3, __lowercase : Dict=32, __lowercase : Optional[int]=3, __lowercase : Tuple=10, __lowercase : str=[10, 20, 30, 40], __lowercase : Tuple=[1, 1, 2, 1], __lowercase : Optional[Any]=True, __lowercase : Optional[Any]=True, __lowercase : Any="relu", __lowercase : Optional[Any]=3, __lowercase : Dict=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(_SCREAMING_SNAKE_CASE )
def A__ ( self : str ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : Optional[Any] ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def A__ ( self : List[str], __lowercase : Optional[int], __lowercase : Dict, __lowercase : int ):
lowercase__ = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def A__ ( self : Optional[Any], __lowercase : List[str], __lowercase : Tuple, __lowercase : Dict ):
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ = model(_SCREAMING_SNAKE_CASE, labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case__ , snake_case__ , unittest.TestCase):
UpperCamelCase__ : Any =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : int =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : str =False
UpperCamelCase__ : Union[str, Any] =False
def A__ ( self : Optional[Any] ):
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=_SCREAMING_SNAKE_CASE, has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self : Dict ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def A__ ( self : Optional[Any] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def A__ ( self : Optional[Any] ):
pass
def A__ ( self : List[str] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_SCREAMING_SNAKE_CASE )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _SCREAMING_SNAKE_CASE )
def A__ ( self : Optional[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self : Tuple ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
self.assertTrue(
torch.all(module.bias == 0 ), msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
def A__ ( self : int ):
def check_hidden_states_output(__lowercase : Union[str, Any], __lowercase : str, __lowercase : str ):
lowercase__ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def A__ ( self : Optional[int] ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self : Dict ):
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_SCREAMING_SNAKE_CASE, return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, _SCREAMING_SNAKE_CASE )
lowercase__ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _SCREAMING_SNAKE_CASE, atol=1e-4 ) )
| 721
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] ="""gpt_neox"""
def __init__( self : Optional[int], __lowercase : int=5_0432, __lowercase : Tuple=6144, __lowercase : Optional[int]=44, __lowercase : Any=64, __lowercase : Dict=2_4576, __lowercase : Any="gelu", __lowercase : List[Any]=0.25, __lowercase : Tuple=1_0000, __lowercase : Optional[int]=0.0, __lowercase : List[str]=0.0, __lowercase : List[str]=0.1, __lowercase : Tuple=2048, __lowercase : str=0.02, __lowercase : Union[str, Any]=1e-5, __lowercase : Optional[int]=True, __lowercase : Any=0, __lowercase : Union[str, Any]=2, __lowercase : Dict=False, __lowercase : Dict=True, __lowercase : Optional[Any]=None, **__lowercase : int, ):
super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = rotary_pct
lowercase__ = rotary_emb_base
lowercase__ = attention_dropout
lowercase__ = hidden_dropout
lowercase__ = classifier_dropout
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = use_parallel_residual
lowercase__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def A__ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowercase__ = self.rope_scaling.get("type", __lowercase )
lowercase__ = self.rope_scaling.get("factor", __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__lowercase, __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 701
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
try:
lowercase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowercase_ = parse_flag_from_env("""RUN_SLOW""", default=False)
lowercase_ = parse_flag_from_env("""RUN_REMOTE""", default=False)
lowercase_ = parse_flag_from_env("""RUN_LOCAL""", default=True)
lowercase_ = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
lowercase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
lowercase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
lowercase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
lowercase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
lowercase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
lowercase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
lowercase_ = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import faiss # noqa
except ImportError:
lowercase__ = unittest.skip("test requires faiss" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import regex # noqa
except ImportError:
lowercase__ = unittest.skip("test requires regex" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ = unittest.skip("test requires elasticsearch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ = unittest.skip("test requires sqlalchemy" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not config.TORCH_AVAILABLE:
lowercase__ = unittest.skip("test requires PyTorch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not config.TF_AVAILABLE:
lowercase__ = unittest.skip("test requires TensorFlow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not config.JAX_AVAILABLE:
lowercase__ = unittest.skip("test requires JAX" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not config.PIL_AVAILABLE:
lowercase__ = unittest.skip("test requires Pillow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def _require_spacy_model(SCREAMING_SNAKE_CASE_ ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ = unittest.skip("test is slow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ = unittest.skip("test is local" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ = unittest.skip("test is packaged" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ = unittest.skip("test requires remote" )(SCREAMING_SNAKE_CASE_ )
return test_case
def __lowerCAmelCase ( *SCREAMING_SNAKE_CASE_ ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith("test" ):
for decorator in decorators:
lowercase__ = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class _snake_case ( lowercase__):
pass
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : str =1
UpperCamelCase__ : Any =2
@contextmanager
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE_=1e-16 ):
lowercase__ = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
# Change the url to an invalid url so that the connection hangs
lowercase__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowercase__ = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ = url
lowercase__ = e.args[0]
lowercase__ = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
lowercase__ = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
raise requests.ConnectionError("Offline mode is enabled." , request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __lowerCAmelCase ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
lowercase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def __lowerCAmelCase ( ):
import gc
gc.collect()
lowercase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowerCAmelCase ( ):
import gc
gc.collect()
lowercase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
try:
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith("500" ) or str(SCREAMING_SNAKE_CASE_ ).startswith("502" ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ )
class _snake_case :
def __init__( self : List[Any], __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any] ):
lowercase__ = returncode
lowercase__ = stdout
lowercase__ = stderr
async def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
while True:
lowercase__ = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
if echo:
print("\nRunning: " , " ".join(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ = []
lowercase__ = []
def tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" ):
lowercase__ = line.decode("utf-8" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label="stderr:" ) ),
] , timeout=SCREAMING_SNAKE_CASE_ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=180 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
lowercase__ = asyncio.get_event_loop()
lowercase__ = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = " ".join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
lowercase__ = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def __lowerCAmelCase ( ):
lowercase__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowercase__ = re.sub(r"^gw" , "" , SCREAMING_SNAKE_CASE_ , 0 , re.M )
return int(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( ):
lowercase__ = 2_9500
lowercase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Initialise PyTorch model
lowercase__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 704
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase_ = """true"""
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=82 , SCREAMING_SNAKE_CASE_=16 ):
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(SCREAMING_SNAKE_CASE_ )
lowercase__ = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
lowercase__ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
lowercase__ = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase__ = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
for batch in dataloader:
lowercase__ , lowercase__ = batch.values()
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ , lowercase__ = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=82 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=16 ):
lowercase__ , lowercase__ , lowercase__ = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}'''
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False ):
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ , lowercase__ = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
lowercase__ , lowercase__ , lowercase__ = setup["no"]
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch["labels"] )
lowercase__ = metric.compute()
# Then do distributed
lowercase__ , lowercase__ , lowercase__ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ = batch["labels"]
lowercase__ , lowercase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
lowercase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __lowerCAmelCase ( ):
lowercase__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
lowercase__ = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 512 )
accelerator.state._reset_state()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 705
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37
| 0
|
def __lowerCAmelCase ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(SCREAMING_SNAKE_CASE_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 707
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ):
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = conva_get[:2]
lowercase__ = conva_get[2]
lowercase__ = size_pa
lowercase__ = rate_w
lowercase__ = rate_t
lowercase__ = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self : Any, __lowercase : List[str] ):
# save model dict with pickle
lowercase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowercase, "wb" ) as f:
pickle.dump(__lowercase, __lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls : Dict, __lowercase : Union[str, Any] ):
# read saved model
with open(__lowercase, "rb" ) as f:
lowercase__ = pickle.load(__lowercase ) # noqa: S301
lowercase__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ = model_dic.get("size_pooling1" )
lowercase__ = model_dic.get("num_bp1" )
lowercase__ = model_dic.get("num_bp2" )
lowercase__ = model_dic.get("num_bp3" )
lowercase__ = model_dic.get("rate_weight" )
lowercase__ = model_dic.get("rate_thre" )
# create model instance
lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# modify model parameter
lowercase__ = model_dic.get("w_conv1" )
lowercase__ = model_dic.get("wkj" )
lowercase__ = model_dic.get("vji" )
lowercase__ = model_dic.get("thre_conv1" )
lowercase__ = model_dic.get("thre_bp2" )
lowercase__ = model_dic.get("thre_bp3" )
return conv_ins
def A__ ( self : str, __lowercase : List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self : List[str], __lowercase : Optional[Any] ):
return round(__lowercase, 3 )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ):
# convolution process
lowercase__ = convs[0]
lowercase__ = convs[1]
lowercase__ = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowercase__ = []
for i_focus in range(0, size_data - size_conv + 1, __lowercase ):
for j_focus in range(0, size_data - size_conv + 1, __lowercase ):
lowercase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ = []
lowercase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowercase__ = []
for i_focus in range(len(__lowercase ) ):
lowercase__ = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(
__lowercase, __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowercase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowercase__ = np.asarray(__lowercase )
return focus_list, data_featuremap
def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ):
# pooling process
lowercase__ = len(featuremaps[0] )
lowercase__ = int(size_map / size_pooling )
lowercase__ = []
for i_map in range(len(__lowercase ) ):
lowercase__ = featuremaps[i_map]
lowercase__ = []
for i_focus in range(0, __lowercase, __lowercase ):
for j_focus in range(0, __lowercase, __lowercase ):
lowercase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def A__ ( self : str, __lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase__ = []
for i in range(len(__lowercase ) ):
lowercase__ = np.shape(data[i] )
lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowercase__ = np.asarray(__lowercase )
return data_expanded
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
# expanding matrix to one dimension list
lowercase__ = np.asarray(__lowercase )
lowercase__ = np.shape(__lowercase )
lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = []
lowercase__ = 0
for i_map in range(__lowercase ):
lowercase__ = np.ones((size_map, size_map) )
for i in range(0, __lowercase, __lowercase ):
for j in range(0, __lowercase, __lowercase ):
lowercase__ = pd_pool[
i_pool
]
lowercase__ = i_pool + 1
lowercase__ = np.multiply(
__lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowercase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowercase )) )
lowercase__ = 0
lowercase__ = []
lowercase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ = np.asmatrix(datas_train[p] )
lowercase__ = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = np.shape(__lowercase )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ = np.multiply(
(data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.multiply(
np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.dot(__lowercase, self.vji )
lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ = pd_conva_pooled.T.getA().tolist()
lowercase__ = self._calculate_gradient_from_pool(
__lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase )
lowercase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ = rp + 1
lowercase__ = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase, "+-" )
plt.plot(__lowercase, "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowercase, alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self : List[str], __lowercase : Optional[int] ):
# model predict
lowercase__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowercase__ = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = bp_outa * self.vji.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def A__ ( self : int, __lowercase : Any ):
# return the data of image after convoluting process so we can check it out
lowercase__ = np.asmatrix(__lowercase )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : str ="""align_text_model"""
def __init__( self : List[Any], __lowercase : str=3_0522, __lowercase : Optional[int]=768, __lowercase : Union[str, Any]=12, __lowercase : List[str]=12, __lowercase : List[str]=3072, __lowercase : Optional[int]="gelu", __lowercase : Optional[Any]=0.1, __lowercase : int=0.1, __lowercase : Optional[int]=512, __lowercase : Tuple=2, __lowercase : str=0.02, __lowercase : str=1e-1_2, __lowercase : int=0, __lowercase : int="absolute", __lowercase : Any=True, **__lowercase : List[str], ):
super().__init__(**__lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = pad_token_id
@classmethod
def A__ ( cls : Optional[int], __lowercase : Union[str, os.PathLike], **__lowercase : List[str] ):
cls._set_token_in_kwargs(__lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(__lowercase, **__lowercase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowercase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase, **__lowercase )
class _snake_case ( lowercase__):
UpperCamelCase__ : str ="""align_vision_model"""
def __init__( self : Union[str, Any], __lowercase : int = 3, __lowercase : int = 600, __lowercase : float = 2.0, __lowercase : float = 3.1, __lowercase : int = 8, __lowercase : List[int] = [3, 3, 5, 3, 5, 5, 3], __lowercase : List[int] = [32, 16, 24, 40, 80, 112, 192], __lowercase : List[int] = [16, 24, 40, 80, 112, 192, 320], __lowercase : List[int] = [], __lowercase : List[int] = [1, 2, 2, 2, 1, 2, 1], __lowercase : List[int] = [1, 2, 2, 3, 3, 4, 1], __lowercase : List[int] = [1, 6, 6, 6, 6, 6, 6], __lowercase : float = 0.25, __lowercase : str = "swish", __lowercase : int = 2560, __lowercase : str = "mean", __lowercase : float = 0.02, __lowercase : float = 0.001, __lowercase : float = 0.99, __lowercase : float = 0.2, **__lowercase : Union[str, Any], ):
super().__init__(**__lowercase )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = width_coefficient
lowercase__ = depth_coefficient
lowercase__ = depth_divisor
lowercase__ = kernel_sizes
lowercase__ = in_channels
lowercase__ = out_channels
lowercase__ = depthwise_padding
lowercase__ = strides
lowercase__ = num_block_repeats
lowercase__ = expand_ratios
lowercase__ = squeeze_expansion_ratio
lowercase__ = hidden_act
lowercase__ = hidden_dim
lowercase__ = pooling_type
lowercase__ = initializer_range
lowercase__ = batch_norm_eps
lowercase__ = batch_norm_momentum
lowercase__ = drop_connect_rate
lowercase__ = sum(__lowercase ) * 4
@classmethod
def A__ ( cls : Optional[Any], __lowercase : Union[str, os.PathLike], **__lowercase : Dict ):
cls._set_token_in_kwargs(__lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(__lowercase, **__lowercase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase, **__lowercase )
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] ="""align"""
UpperCamelCase__ : Optional[Any] =True
def __init__( self : List[Any], __lowercase : int=None, __lowercase : Tuple=None, __lowercase : Any=640, __lowercase : Tuple=1.0, __lowercase : Optional[int]=0.02, **__lowercase : str, ):
super().__init__(**__lowercase )
if text_config is None:
lowercase__ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowercase__ = AlignTextConfig(**__lowercase )
lowercase__ = AlignVisionConfig(**__lowercase )
lowercase__ = projection_dim
lowercase__ = temperature_init_value
lowercase__ = initializer_range
@classmethod
def A__ ( cls : Tuple, __lowercase : AlignTextConfig, __lowercase : AlignVisionConfig, **__lowercase : int ):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.text_config.to_dict()
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 708
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Tuple =DanceDiffusionPipeline
UpperCamelCase__ : Any =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase__ : Optional[Any] =PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
UpperCamelCase__ : str =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : int ):
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=1_6000, in_channels=2, out_channels=2, flip_sin_to_cos=__lowercase, use_timestep_embedding=__lowercase, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), )
lowercase__ = IPNDMScheduler()
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def A__ ( self : int, __lowercase : int, __lowercase : Optional[Any]=0 ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def A__ ( self : List[str] ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = DanceDiffusionPipeline(**__lowercase )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = pipe(**__lowercase )
lowercase__ = output.audios
lowercase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A__ ( self : Union[str, Any] ):
return super().test_save_load_local()
@skip_mps
def A__ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def A__ ( self : Dict ):
return super().test_save_load_optional_components()
@skip_mps
def A__ ( self : Dict ):
return super().test_attention_slicing_forward_pass()
def A__ ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
lowercase__ = torch_device
lowercase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(generator=__lowercase, num_inference_steps=100, audio_length_in_s=4.096 )
lowercase__ = output.audios
lowercase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self : str ):
lowercase__ = torch_device
lowercase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.floataa )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(generator=__lowercase, num_inference_steps=100, audio_length_in_s=4.096 )
lowercase__ = output.audios
lowercase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 709
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37
| 0
|
from __future__ import annotations
from collections.abc import Callable
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 100 , ):
lowercase__ = x_start
lowercase__ = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ = xa
lowercase__ = fxa
return area
if __name__ == "__main__":
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase_ = 10
while i <= 10_0000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 710
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37
| 0
|
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 711
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =TransfoXLTokenizer
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : Union[str, Any] ):
super().setUp()
lowercase__ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self : Union[str, Any], **__lowercase : Any ):
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Tuple, __lowercase : Optional[int] ):
lowercase__ = "<unk> UNwanted , running"
lowercase__ = "<unk> unwanted, running"
return input_text, output_text
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase )
lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
lowercase__ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.get_tokenizer()
lowercase__ = len(__lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1", 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ), [1] )
self.assertEqual(tokenizer.decode([1] ), "new1" )
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int ="""gptsan-japanese"""
UpperCamelCase__ : str =[
"""past_key_values""",
]
UpperCamelCase__ : Union[str, Any] ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str, __lowercase : Any=3_6000, __lowercase : List[str]=1280, __lowercase : int=1024, __lowercase : List[str]=8192, __lowercase : Optional[Any]=4096, __lowercase : Tuple=128, __lowercase : Any=10, __lowercase : Optional[Any]=0, __lowercase : int=16, __lowercase : int=16, __lowercase : Optional[Any]=128, __lowercase : int=0.0, __lowercase : Optional[Any]=1e-5, __lowercase : int=False, __lowercase : Union[str, Any]=0.0, __lowercase : Dict="float32", __lowercase : Any=False, __lowercase : Any=False, __lowercase : Any=False, __lowercase : Any=0.002, __lowercase : int=False, __lowercase : Optional[Any]=True, __lowercase : str=3_5998, __lowercase : List[str]=3_5995, __lowercase : Union[str, Any]=3_5999, **__lowercase : Tuple, ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = d_ff
lowercase__ = d_ext
lowercase__ = d_spout
lowercase__ = num_switch_layers
lowercase__ = num_ext_layers
lowercase__ = num_switch_layers + num_ext_layers
lowercase__ = num_heads
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = router_bias
lowercase__ = router_jitter_noise
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = output_hidden_states
lowercase__ = output_attentions
lowercase__ = initializer_factor
lowercase__ = output_router_logits
lowercase__ = use_cache
super().__init__(
separator_token_id=__lowercase, pad_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase, )
| 712
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 0
|
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return np.maximum(0 , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 713
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger("""transformers.models.speecht5""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
hf_model.apply_weight_norm()
lowercase__ = checkpoint["input_conv.weight_g"]
lowercase__ = checkpoint["input_conv.weight_v"]
lowercase__ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_g''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_v''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
lowercase__ = checkpoint["output_conv.1.weight_g"]
lowercase__ = checkpoint["output_conv.1.weight_v"]
lowercase__ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
if config_path is not None:
lowercase__ = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ = SpeechTaHifiGanConfig()
lowercase__ = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ )
load_weights(orig_checkpoint["model"]["generator"] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = np.load(SCREAMING_SNAKE_CASE_ )
lowercase__ = stats[0].reshape(-1 )
lowercase__ = stats[1].reshape(-1 )
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).float()
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 714
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
from __future__ import annotations
import queue
class _snake_case :
def __init__( self : Optional[Any], __lowercase : Union[str, Any] ):
lowercase__ = data
lowercase__ = None
lowercase__ = None
def __lowerCAmelCase ( ):
print("\n********Press N to stop entering at any point of time********\n" )
lowercase__ = input("Enter the value of the root node: " ).strip().lower()
lowercase__ = queue.Queue()
lowercase__ = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowercase__ = q.get()
lowercase__ = f'''Enter the left node of {node_found.data}: '''
lowercase__ = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or "n"
if check == "n":
return tree_node
lowercase__ = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = left_node
q.put(SCREAMING_SNAKE_CASE_ )
lowercase__ = f'''Enter the right node of {node_found.data}: '''
lowercase__ = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or "n"
if check == "n":
return tree_node
lowercase__ = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = right_node
q.put(SCREAMING_SNAKE_CASE_ )
raise
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowercase__ = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowercase__ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowercase__ = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowercase__ = []
while not q.empty():
lowercase__ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowercase__ = []
lowercase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(SCREAMING_SNAKE_CASE_ )
lowercase__ = n.left
# end of while means current node doesn't have left child
lowercase__ = stack.pop()
# start to traverse its right child
lowercase__ = n.right
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowercase__ = []
lowercase__ = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE_ )
lowercase__ = n.left
lowercase__ = stack.pop()
print(n.data , end="," )
lowercase__ = n.right
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowercase__ , lowercase__ = [], []
lowercase__ = node
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowercase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_=50 , SCREAMING_SNAKE_CASE_="*" ):
if not s:
return "\n" + width * char
lowercase__ , lowercase__ = divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowercase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 0
|
import re
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
lowercase_ = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 716
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 0
|
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase_ = 6378137.0
lowercase_ = 6356752.314245
lowercase_ = 637_8137
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
lowercase__ = radians(SCREAMING_SNAKE_CASE_ )
lowercase__ = radians(SCREAMING_SNAKE_CASE_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE_ ) * cos(SCREAMING_SNAKE_CASE_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =DiTPipeline
UpperCamelCase__ : Tuple =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : List[str] =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : int =False
def A__ ( self : int ):
torch.manual_seed(0 )
lowercase__ = TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=__lowercase, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=__lowercase, )
lowercase__ = AutoencoderKL()
lowercase__ = DDIMScheduler()
lowercase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def A__ ( self : Optional[int], __lowercase : Dict, __lowercase : Optional[int]=0 ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A__ ( self : List[Any] ):
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
lowercase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase, 1e-3 )
def A__ ( self : Optional[int] ):
self._test_inference_batch_single_identical(relax_max_difference=__lowercase, expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A__ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase):
def A__ ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
lowercase__ = torch.manual_seed(0 )
lowercase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ = pipe.get_label_ids(__lowercase )
lowercase__ = pipe(__lowercase, generator=__lowercase, num_inference_steps=40, output_type="np" ).images
for word, image in zip(__lowercase, __lowercase ):
lowercase__ = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ ( self : Dict ):
lowercase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ = ["vase", "umbrella"]
lowercase__ = pipe.get_label_ids(__lowercase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(__lowercase, generator=__lowercase, num_inference_steps=25, output_type="np" ).images
for word, image in zip(__lowercase, __lowercase ):
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ ) // 2
# choose the middle 3 elements
lowercase__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowercase__ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowercase__ = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple ="""roformer"""
def __init__( self : Optional[int], __lowercase : Union[str, Any]=5_0000, __lowercase : Tuple=None, __lowercase : Union[str, Any]=768, __lowercase : Union[str, Any]=12, __lowercase : Any=12, __lowercase : int=3072, __lowercase : Tuple="gelu", __lowercase : Optional[int]=0.1, __lowercase : List[Any]=0.1, __lowercase : Union[str, Any]=1536, __lowercase : List[Any]=2, __lowercase : Optional[int]=0.02, __lowercase : List[Any]=1e-1_2, __lowercase : int=0, __lowercase : List[Any]=False, __lowercase : Any=True, **__lowercase : Dict, ):
super().__init__(pad_token_id=__lowercase, **__lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size if embedding_size is None else embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = rotary_value
lowercase__ = use_cache
class _snake_case ( lowercase__):
@property
def A__ ( self : Dict ):
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 721
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( lowercase__):
def A__ ( self : str ):
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase, "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowercase, "num_attention_heads" ) )
class _snake_case :
def __init__( self : Any, __lowercase : List[str], __lowercase : Dict=13, __lowercase : str=64, __lowercase : str=3, __lowercase : Any=3, __lowercase : Tuple=2, __lowercase : List[str]=1, __lowercase : int=16, __lowercase : Optional[int]=[128, 256, 384], __lowercase : List[Any]=[4, 6, 8], __lowercase : str=[2, 3, 4], __lowercase : List[Any]=[16, 16, 16], __lowercase : int=0, __lowercase : str=[2, 2, 2], __lowercase : Tuple=[2, 2, 2], __lowercase : Dict=0.02, __lowercase : Optional[Any]=True, __lowercase : Any=True, __lowercase : List[str]=2, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def A__ ( self : Dict ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : Tuple ):
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def A__ ( self : Dict, __lowercase : Optional[int], __lowercase : Any, __lowercase : str ):
lowercase__ = LevitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def A__ ( self : List[Any], __lowercase : int, __lowercase : List[str], __lowercase : List[str] ):
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : str ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : str =(
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase__ : Dict =(
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : str =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : str =False
UpperCamelCase__ : List[str] =False
def A__ ( self : Union[str, Any] ):
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 )
def A__ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self : int ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def A__ ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def A__ ( self : Any ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def A__ ( self : Tuple ):
pass
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : int ):
def check_hidden_states_output(__lowercase : str, __lowercase : Tuple, __lowercase : List[Any] ):
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowercase ), __lowercase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self : Optional[int] ):
pass
def A__ ( self : int, __lowercase : List[str], __lowercase : List[str], __lowercase : Dict=False ):
lowercase__ = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self : str ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def A__ ( self : Dict ):
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = model(**__lowercase ).loss
loss.backward()
def A__ ( self : Optional[int] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = model(**__lowercase ).loss
loss.backward()
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase__ = problem_type["title"]
lowercase__ = problem_type["num_labels"]
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["labels"].unsqueeze(1 ).repeat(1, problem_type["num_labels"] )
lowercase__ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowercase ) as warning_list:
lowercase__ = model(**__lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self : Any ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : int ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ ( self : Optional[Any] ):
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
lowercase__ = model(**__lowercase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) )
| 700
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowercase_ = logging.getLogger()
lowercase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _snake_case ( lowercase__):
def A__ ( self : Tuple, __lowercase : Tuple ):
os.makedirs(__lowercase, exist_ok=__lowercase )
lowercase__ = {"source": "What is love ?", "target": "life"}
lowercase__ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowercase__ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowercase, F'''{split}.{field}''' ), "w" ) as f:
f.write(__lowercase )
def A__ ( self : int, __lowercase : int, __lowercase : str = "pytorch" ):
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = os.path.join(__lowercase, "output" )
lowercase__ = os.path.join(__lowercase, "data" )
self._create_dummy_data(data_dir=__lowercase )
lowercase__ = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
lowercase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowercase, env=self.get_env() )
lowercase__ = os.path.join(__lowercase, "metrics.json" )
with open(__lowercase ) as f:
lowercase__ = json.load(__lowercase )
return result
@require_torch_gpu
def A__ ( self : Optional[Any] ):
lowercase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 )
@require_torch_multi_gpu
def A__ ( self : Optional[Any] ):
lowercase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 )
@require_torch_gpu
@require_ray
def A__ ( self : Tuple ):
lowercase__ = self._run_finetune(gpus=1, distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 )
@require_torch_multi_gpu
@require_ray
def A__ ( self : Tuple ):
lowercase__ = self._run_finetune(gpus=1, distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 )
| 701
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""llama"""
UpperCamelCase__ : str =["""past_key_values"""]
def __init__( self : Optional[int], __lowercase : str=3_2000, __lowercase : Union[str, Any]=4096, __lowercase : str=1_1008, __lowercase : List[str]=32, __lowercase : Optional[int]=32, __lowercase : Tuple=None, __lowercase : str="silu", __lowercase : Optional[int]=2048, __lowercase : Any=0.02, __lowercase : Optional[Any]=1e-6, __lowercase : Optional[int]=True, __lowercase : int=0, __lowercase : Optional[int]=1, __lowercase : str=2, __lowercase : List[Any]=1, __lowercase : Any=False, __lowercase : int=None, **__lowercase : Optional[int], ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowercase__ = num_attention_heads
lowercase__ = num_key_value_heads
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = rms_norm_eps
lowercase__ = pretraining_tp
lowercase__ = use_cache
lowercase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, tie_word_embeddings=__lowercase, **__lowercase, )
def A__ ( self : List[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowercase__ = self.rope_scaling.get("type", __lowercase )
lowercase__ = self.rope_scaling.get("factor", __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__lowercase, __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _snake_case :
def __init__( self : List[str], __lowercase : Union[str, Any], __lowercase : int=13, __lowercase : List[Any]=7, __lowercase : str=True, __lowercase : List[Any]=True, __lowercase : Union[str, Any]=False, __lowercase : Optional[int]=True, __lowercase : Dict=99, __lowercase : List[Any]=64, __lowercase : Optional[int]=5, __lowercase : Union[str, Any]=4, __lowercase : List[Any]=64, __lowercase : Union[str, Any]="gelu", __lowercase : Any=0.1, __lowercase : Optional[Any]=0.1, __lowercase : Dict=512, __lowercase : Tuple=16, __lowercase : int=2, __lowercase : List[str]=0.02, __lowercase : Union[str, Any]=3, __lowercase : str=4, __lowercase : int=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def A__ ( self : str ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A__ ( self : str ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any ):
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def A__ ( self : List[Any], __lowercase : List[Any], __lowercase : Union[str, Any], __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Tuple, __lowercase : Optional[Any] ):
lowercase__ = MPNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, __lowercase )
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A__ ( self : int, __lowercase : List[Any], __lowercase : int, __lowercase : List[str], __lowercase : Optional[int], __lowercase : int, __lowercase : List[Any] ):
lowercase__ = MPNetForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(
__lowercase, attention_mask=__lowercase, start_positions=__lowercase, end_positions=__lowercase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A__ ( self : Tuple, __lowercase : Any, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Optional[int] ):
lowercase__ = self.num_labels
lowercase__ = MPNetForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Union[str, Any], __lowercase : Optional[Any], __lowercase : Optional[int], __lowercase : Dict, __lowercase : Tuple, __lowercase : Any, __lowercase : List[str] ):
lowercase__ = self.num_choices
lowercase__ = MPNetForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__lowercase, attention_mask=__lowercase, labels=__lowercase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def A__ ( self : Tuple, __lowercase : Optional[int], __lowercase : Any, __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : int, __lowercase : Optional[int] ):
lowercase__ = self.num_labels
lowercase__ = MPNetForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Tuple ):
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCamelCase__ =(
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =True
def A__ ( self : List[Any] ):
lowercase__ = MPNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, hidden_size=37 )
def A__ ( self : List[str] ):
self.config_tester.run_common_tests()
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowercase )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowercase )
def A__ ( self : List[Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowercase )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowercase )
def A__ ( self : int ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowercase )
@require_torch
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : Dict ):
lowercase__ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = model(__lowercase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __lowercase )
lowercase__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], __lowercase, atol=1e-4 ) )
| 703
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = ""
else:
lowercase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowercase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
lowercase__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowercase__ = 8
# set labels if required
if not base_model:
lowercase__ = 1000
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 6
# load original model from torch hub
lowercase__ = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
lowercase__ = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
lowercase__ = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
lowercase__ = ViTImageProcessor()
lowercase__ = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ = encoding["pixel_values"]
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
if base_model:
lowercase__ = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
lowercase__ = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 704
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] ="""donut-swin"""
UpperCamelCase__ : Any ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any, __lowercase : Dict=224, __lowercase : str=4, __lowercase : Optional[int]=3, __lowercase : Dict=96, __lowercase : List[str]=[2, 2, 6, 2], __lowercase : Union[str, Any]=[3, 6, 12, 24], __lowercase : str=7, __lowercase : Optional[Any]=4.0, __lowercase : Tuple=True, __lowercase : List[str]=0.0, __lowercase : int=0.0, __lowercase : Optional[Any]=0.1, __lowercase : str="gelu", __lowercase : List[str]=False, __lowercase : Optional[Any]=0.02, __lowercase : Optional[int]=1e-5, **__lowercase : int, ):
super().__init__(**__lowercase )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(__lowercase )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
| 705
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
lowercase_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
lowercase_ = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
lowercase_ = BeautifulSoup(res.text, """html.parser""")
lowercase_ = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 706
|
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _snake_case ( nn.Module):
def __init__( self : Union[str, Any], __lowercase : nn.Module, __lowercase : int ):
super().__init__()
lowercase__ = module
lowercase__ = nn.Sequential(
nn.Linear(module.in_features, __lowercase, bias=__lowercase ), nn.Linear(__lowercase, module.out_features, bias=__lowercase ), )
lowercase__ = (2.0 / (5 * min(module.in_features, module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=__lowercase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A__ ( self : List[str], __lowercase : Any, *__lowercase : Any, **__lowercase : Dict ):
return self.module(__lowercase, *__lowercase, **__lowercase ) + self.adapter(__lowercase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCamelCase__ : int ="""bigscience/bloom-1b7"""
# Constant values
UpperCamelCase__ : List[str] =2.109_659_552_692_574
UpperCamelCase__ : str ="""Hello my name is"""
UpperCamelCase__ : List[Any] =set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""")
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""")
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""")
UpperCamelCase__ : Optional[Any] =1_0
def A__ ( self : Optional[int] ):
# Models and tokenizer
lowercase__ = AutoTokenizer.from_pretrained(self.model_name )
class _snake_case ( lowercase__):
def A__ ( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.floataa, device_map="auto" )
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" )
def A__ ( self : Union[str, Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : int ):
lowercase__ = self.model_abit.config
self.assertTrue(hasattr(__lowercase, "quantization_config" ) )
lowercase__ = config.to_dict()
lowercase__ = config.to_diff_dict()
lowercase__ = config.to_json_string()
def A__ ( self : List[str] ):
from bitsandbytes.nn import Paramsabit
lowercase__ = self.model_fpaa.get_memory_footprint()
lowercase__ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit, self.EXPECTED_RELATIVE_DIFFERENCE )
lowercase__ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A__ ( self : Optional[Any] ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__lowercase, torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A__ ( self : Optional[int] ):
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" )
lowercase__ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=__lowercase ), self.EXPECTED_OUTPUTS )
def A__ ( self : str ):
lowercase__ = BitsAndBytesConfig()
lowercase__ = True
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=__lowercase, device_map="auto" )
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" )
lowercase__ = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=__lowercase ), self.EXPECTED_OUTPUTS )
def A__ ( self : int ):
with self.assertRaises(__lowercase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__lowercase )
def A__ ( self : str ):
lowercase__ = BitsAndBytesConfig()
with self.assertRaises(__lowercase ):
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=__lowercase, load_in_abit=__lowercase, device_map="auto", bnb_abit_quant_type="nf4", )
def A__ ( self : Tuple ):
with self.assertRaises(__lowercase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__lowercase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__lowercase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" )
lowercase__ = self.model_fpaa.to(torch.floataa )
lowercase__ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 )
# Check this does not throw an error
lowercase__ = self.model_fpaa.to("cpu" )
# Check this does not throw an error
lowercase__ = self.model_fpaa.half()
# Check this does not throw an error
lowercase__ = self.model_fpaa.float()
def A__ ( self : Any ):
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained("t5-small", load_in_abit=__lowercase, device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase):
@classmethod
def A__ ( cls : Union[str, Any] ):
lowercase__ = "t5-small"
lowercase__ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
lowercase__ = AutoTokenizer.from_pretrained(cls.model_name )
lowercase__ = "Translate in German: Hello, my dog is cute"
def A__ ( self : List[str] ):
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Dict ):
from transformers import TaForConditionalGeneration
lowercase__ = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase__ = None
# test with `t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" )
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 )
lowercase__ = model.generate(**__lowercase )
# test with `flan-t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=__lowercase, device_map="auto" )
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 )
lowercase__ = model.generate(**__lowercase )
lowercase__ = modules
def A__ ( self : int ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linearabit ) )
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 )
lowercase__ = model.generate(**__lowercase )
# test with `flan-t5-small`
lowercase__ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=__lowercase, device_map="auto" )
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 )
lowercase__ = model.generate(**__lowercase )
class _snake_case ( lowercase__):
def A__ ( self : Tuple ):
super().setUp()
# model_name
lowercase__ = "bigscience/bloom-560m"
lowercase__ = "t5-small"
# Different types of model
lowercase__ = AutoModel.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" )
# Sequence classification model
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
self.model_name, load_in_abit=__lowercase, device_map="auto" )
# CausalLM model
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" )
# Seq2seq model
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name, load_in_abit=__lowercase, device_map="auto" )
def A__ ( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Optional[int] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _snake_case ( lowercase__):
def A__ ( self : Tuple ):
super().setUp()
def A__ ( self : List[Any] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Union[str, Any] ):
lowercase__ = pipeline(
"text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa}, max_new_tokens=self.MAX_NEW_TOKENS, )
# Real second forward pass
lowercase__ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _snake_case ( lowercase__):
def A__ ( self : Any ):
super().setUp()
def A__ ( self : str ):
lowercase__ = AutoModelForCausalLM.from_pretrained(
self.model_name, load_in_abit=__lowercase, device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ), {0, 1} )
# Check that inference pass works on the model
lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" )
# Second real batch
lowercase__ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=__lowercase ), self.EXPECTED_OUTPUTS )
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any] ):
lowercase__ = "facebook/opt-350m"
super().setUp()
def A__ ( self : Dict ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=__lowercase )
self.assertEqual(set(model.hf_device_map.values() ), {torch.cuda.current_device()} )
for param in model.parameters():
lowercase__ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase__ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__lowercase ) ):
lowercase__ = LoRALayer(module.q_proj, rank=16 )
lowercase__ = LoRALayer(module.k_proj, rank=16 )
lowercase__ = LoRALayer(module.v_proj, rank=16 )
# Step 3: dummy batch
lowercase__ = self.tokenizer("Test batch ", return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase__ = model.forward(**__lowercase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__lowercase, __lowercase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__lowercase, nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] ="""gpt2-xl"""
UpperCamelCase__ : Any =3.3_191_854_854_152_187
| 707
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ):
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = conva_get[:2]
lowercase__ = conva_get[2]
lowercase__ = size_pa
lowercase__ = rate_w
lowercase__ = rate_t
lowercase__ = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self : Any, __lowercase : List[str] ):
# save model dict with pickle
lowercase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowercase, "wb" ) as f:
pickle.dump(__lowercase, __lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls : Dict, __lowercase : Union[str, Any] ):
# read saved model
with open(__lowercase, "rb" ) as f:
lowercase__ = pickle.load(__lowercase ) # noqa: S301
lowercase__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ = model_dic.get("size_pooling1" )
lowercase__ = model_dic.get("num_bp1" )
lowercase__ = model_dic.get("num_bp2" )
lowercase__ = model_dic.get("num_bp3" )
lowercase__ = model_dic.get("rate_weight" )
lowercase__ = model_dic.get("rate_thre" )
# create model instance
lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# modify model parameter
lowercase__ = model_dic.get("w_conv1" )
lowercase__ = model_dic.get("wkj" )
lowercase__ = model_dic.get("vji" )
lowercase__ = model_dic.get("thre_conv1" )
lowercase__ = model_dic.get("thre_bp2" )
lowercase__ = model_dic.get("thre_bp3" )
return conv_ins
def A__ ( self : str, __lowercase : List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self : List[str], __lowercase : Optional[Any] ):
return round(__lowercase, 3 )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ):
# convolution process
lowercase__ = convs[0]
lowercase__ = convs[1]
lowercase__ = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowercase__ = []
for i_focus in range(0, size_data - size_conv + 1, __lowercase ):
for j_focus in range(0, size_data - size_conv + 1, __lowercase ):
lowercase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ = []
lowercase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowercase__ = []
for i_focus in range(len(__lowercase ) ):
lowercase__ = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(
__lowercase, __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowercase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowercase__ = np.asarray(__lowercase )
return focus_list, data_featuremap
def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ):
# pooling process
lowercase__ = len(featuremaps[0] )
lowercase__ = int(size_map / size_pooling )
lowercase__ = []
for i_map in range(len(__lowercase ) ):
lowercase__ = featuremaps[i_map]
lowercase__ = []
for i_focus in range(0, __lowercase, __lowercase ):
for j_focus in range(0, __lowercase, __lowercase ):
lowercase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def A__ ( self : str, __lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase__ = []
for i in range(len(__lowercase ) ):
lowercase__ = np.shape(data[i] )
lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowercase__ = np.asarray(__lowercase )
return data_expanded
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
# expanding matrix to one dimension list
lowercase__ = np.asarray(__lowercase )
lowercase__ = np.shape(__lowercase )
lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = []
lowercase__ = 0
for i_map in range(__lowercase ):
lowercase__ = np.ones((size_map, size_map) )
for i in range(0, __lowercase, __lowercase ):
for j in range(0, __lowercase, __lowercase ):
lowercase__ = pd_pool[
i_pool
]
lowercase__ = i_pool + 1
lowercase__ = np.multiply(
__lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowercase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowercase )) )
lowercase__ = 0
lowercase__ = []
lowercase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ = np.asmatrix(datas_train[p] )
lowercase__ = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = np.shape(__lowercase )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ = np.multiply(
(data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.multiply(
np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.dot(__lowercase, self.vji )
lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ = pd_conva_pooled.T.getA().tolist()
lowercase__ = self._calculate_gradient_from_pool(
__lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase )
lowercase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ = rp + 1
lowercase__ = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase, "+-" )
plt.plot(__lowercase, "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowercase, alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self : List[str], __lowercase : Optional[int] ):
# model predict
lowercase__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowercase__ = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = bp_outa * self.vji.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def A__ ( self : int, __lowercase : Any ):
# return the data of image after convoluting process so we can check it out
lowercase__ = np.asmatrix(__lowercase )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase):
def __init__( self : Tuple, __lowercase : Any, __lowercase : List[Any]=13, __lowercase : Union[str, Any]=7, __lowercase : Tuple=True, __lowercase : int=True, __lowercase : List[str]=True, __lowercase : Any=True, __lowercase : Optional[Any]=99, __lowercase : List[Any]=32, __lowercase : Tuple=5, __lowercase : Optional[int]=4, __lowercase : Dict=37, __lowercase : str="gelu", __lowercase : List[str]=0.1, __lowercase : List[str]=0.1, __lowercase : Tuple=512, __lowercase : Tuple=16, __lowercase : List[str]=2, __lowercase : int=0.02, __lowercase : List[Any]=4, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def A__ ( self : int ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__lowercase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self : str ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def A__ ( self : Optional[Any] ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : int =True
UpperCamelCase__ : Union[str, Any] =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self : Tuple ):
lowercase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def A__ ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=__lowercase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
@require_flax
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : str ):
lowercase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=__lowercase )
lowercase__ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
lowercase__ = model(__lowercase )[0]
lowercase__ = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), __lowercase )
# compare the actual values for a slice.
lowercase__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], __lowercase, atol=1e-4 ) )
@slow
def A__ ( self : Union[str, Any] ):
lowercase__ = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=__lowercase )
lowercase__ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
lowercase__ = model(__lowercase )[0]
# compare the actual values for a slice.
lowercase__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], __lowercase, atol=1e-4 ) )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
| 0
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 709
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37
| 0
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =AutoencoderKL
UpperCamelCase__ : Any ="""sample"""
UpperCamelCase__ : str =1E-2
@property
def A__ ( self : int ):
lowercase__ = 4
lowercase__ = 3
lowercase__ = (32, 32)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
return {"sample": image}
@property
def A__ ( self : int ):
return (3, 32, 32)
@property
def A__ ( self : Optional[int] ):
return (3, 32, 32)
def A__ ( self : int ):
lowercase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def A__ ( self : Union[str, Any] ):
pass
def A__ ( self : Dict ):
pass
@unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS" )
def A__ ( self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**__lowercase )
model.to(__lowercase )
assert not model.is_gradient_checkpointing and model.training
lowercase__ = model(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase__ = torch.randn_like(__lowercase )
lowercase__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase__ = self.model_class(**__lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase__ = model_a(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase__ = dict(model.named_parameters() )
lowercase__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data, named_params_a[name].grad.data, atol=5e-5 ) )
def A__ ( self : Union[str, Any] ):
lowercase__ , lowercase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(__lowercase )
lowercase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self : Any ):
lowercase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowercase__ = model.to(__lowercase )
model.eval()
if torch_device == "mps":
lowercase__ = torch.manual_seed(0 )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(0 )
lowercase__ = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
lowercase__ = image.to(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase, sample_posterior=__lowercase, generator=__lowercase ).sample
lowercase__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase__ = torch.tensor(
[
-4.0_0_7_8e-0_1,
-3.8_3_2_3e-0_4,
-1.2_6_8_1e-0_1,
-1.1_4_6_2e-0_1,
2.0_0_9_5e-0_1,
1.0_8_9_3e-0_1,
-8.8_2_4_7e-0_2,
-3.0_3_6_1e-0_1,
-9.8_6_4_4e-0_3,
] )
elif torch_device == "cpu":
lowercase__ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowercase__ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__lowercase, __lowercase, rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase):
def A__ ( self : Dict, __lowercase : int, __lowercase : str ):
return F'''gaussian_noise_s={seed}_shape={"_".join([str(__lowercase ) for s in shape] )}.npy'''
def A__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Optional[Any], __lowercase : Optional[Any]=0, __lowercase : List[str]=(4, 3, 512, 512), __lowercase : List[Any]=False ):
lowercase__ = torch.floataa if fpaa else torch.floataa
lowercase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowercase, __lowercase ) ) ).to(__lowercase ).to(__lowercase )
return image
def A__ ( self : Optional[int], __lowercase : Any="CompVis/stable-diffusion-v1-4", __lowercase : List[Any]=False ):
lowercase__ = "fp16" if fpaa else None
lowercase__ = torch.floataa if fpaa else torch.floataa
lowercase__ = AutoencoderKL.from_pretrained(
__lowercase, subfolder="vae", torch_dtype=__lowercase, revision=__lowercase, )
model.to(__lowercase ).eval()
return model
def A__ ( self : str, __lowercase : Optional[int]=0 ):
if torch_device == "mps":
return torch.manual_seed(__lowercase )
return torch.Generator(device=__lowercase ).manual_seed(__lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self : int, __lowercase : str, __lowercase : Union[str, Any], __lowercase : List[Any] ):
lowercase__ = self.get_sd_vae_model()
lowercase__ = self.get_sd_image(__lowercase )
lowercase__ = self.get_generator(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase, generator=__lowercase, sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__lowercase, __lowercase, atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self : int, __lowercase : Optional[int], __lowercase : Tuple ):
lowercase__ = self.get_sd_vae_model(fpaa=__lowercase )
lowercase__ = self.get_sd_image(__lowercase, fpaa=__lowercase )
lowercase__ = self.get_generator(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase, generator=__lowercase, sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase__ = torch.tensor(__lowercase )
assert torch_all_close(__lowercase, __lowercase, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self : Any, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : List[Any] ):
lowercase__ = self.get_sd_vae_model()
lowercase__ = self.get_sd_image(__lowercase )
with torch.no_grad():
lowercase__ = model(__lowercase ).sample
assert sample.shape == image.shape
lowercase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__lowercase, __lowercase, atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self : Optional[Any], __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = self.get_sd_vae_model()
lowercase__ = self.get_sd_image(__lowercase, shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase__ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase__ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase__ = torch.tensor(__lowercase )
assert torch_all_close(__lowercase, __lowercase, atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self : Optional[int], __lowercase : List[Any], __lowercase : Any ):
lowercase__ = self.get_sd_vae_model(fpaa=__lowercase )
lowercase__ = self.get_sd_image(__lowercase, shape=(3, 4, 64, 64), fpaa=__lowercase )
with torch.no_grad():
lowercase__ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase__ = torch.tensor(__lowercase )
assert torch_all_close(__lowercase, __lowercase, atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self : Tuple, __lowercase : Union[str, Any] ):
lowercase__ = self.get_sd_vae_model(fpaa=__lowercase )
lowercase__ = self.get_sd_image(__lowercase, shape=(3, 4, 64, 64), fpaa=__lowercase )
with torch.no_grad():
lowercase__ = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase__ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase, __lowercase, atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self : int, __lowercase : Any ):
lowercase__ = self.get_sd_vae_model()
lowercase__ = self.get_sd_image(__lowercase, shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase__ = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase__ = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase, __lowercase, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A__ ( self : Optional[Any], __lowercase : Union[str, Any], __lowercase : Optional[Any] ):
lowercase__ = self.get_sd_vae_model()
lowercase__ = self.get_sd_image(__lowercase )
lowercase__ = self.get_generator(__lowercase )
with torch.no_grad():
lowercase__ = model.encode(__lowercase ).latent_dist
lowercase__ = dist.sample(generator=__lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase__ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase__ = torch.tensor(__lowercase )
lowercase__ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(__lowercase, __lowercase, atol=__lowercase )
| 710
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ = True if "large" in model_name or "huge" in model_name else False
lowercase__ = True if "large" in model_name or "huge" in model_name else False
lowercase__ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 96
elif "small" in model_name:
lowercase__ = 96
elif "base" in model_name:
lowercase__ = 128
elif "large" in model_name:
lowercase__ = 192
elif "xlarge" in model_name:
lowercase__ = 256
elif "huge" in model_name:
lowercase__ = 352
# set label information
lowercase__ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ = "imagenet-22k-id2label.json"
else:
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , focal_levels=SCREAMING_SNAKE_CASE_ , focal_windows=SCREAMING_SNAKE_CASE_ , use_conv_embed=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , use_post_layernorm=SCREAMING_SNAKE_CASE_ , use_layerscale=SCREAMING_SNAKE_CASE_ , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
if "patch_embed.proj" in name:
lowercase__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ = "encoder." + name
if "encoder.layers" in name:
lowercase__ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ = "layernorm.weight"
if name == "norm.bias":
lowercase__ = "layernorm.bias"
if "head" in name:
lowercase__ = name.replace("head" , "classifier" )
else:
lowercase__ = "focalnet." + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
# fmt: off
lowercase__ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print("Checkpoint URL: " , SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
lowercase__ = get_focalnet_config(SCREAMING_SNAKE_CASE_ )
lowercase__ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify conversion
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ , )
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
lowercase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowercase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ = image_transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
lowercase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =TransfoXLTokenizer
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : Union[str, Any] ):
super().setUp()
lowercase__ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self : Union[str, Any], **__lowercase : Any ):
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Tuple, __lowercase : Optional[int] ):
lowercase__ = "<unk> UNwanted , running"
lowercase__ = "<unk> unwanted, running"
return input_text, output_text
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase )
lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
lowercase__ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.get_tokenizer()
lowercase__ = len(__lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1", 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ), [1] )
self.assertEqual(tokenizer.decode([1] ), "new1" )
| 37
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Dict ="""mobilenet_v1"""
def __init__( self : Optional[int], __lowercase : Tuple=3, __lowercase : Optional[int]=224, __lowercase : str=1.0, __lowercase : List[Any]=8, __lowercase : List[str]="relu6", __lowercase : Optional[int]=True, __lowercase : Optional[int]=0.999, __lowercase : Any=0.02, __lowercase : int=0.001, **__lowercase : Optional[Any], ):
super().__init__(**__lowercase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = hidden_act
lowercase__ = tf_padding
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
class _snake_case ( lowercase__):
UpperCamelCase__ : Dict =version.parse("""1.11""")
@property
def A__ ( self : List[Any] ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A__ ( self : Optional[Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A__ ( self : Optional[Any] ):
return 1e-4
| 712
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _snake_case ( nn.Module):
def __init__( self : List[Any], __lowercase : List[Any] ):
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=__lowercase )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*__lowercase )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self : Union[str, Any], __lowercase : Dict ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase__ = self.pool(self.model(__lowercase ) )
lowercase__ = torch.flatten(__lowercase, start_dim=2 )
lowercase__ = out.transpose(1, 2 ).contiguous()
return out # BxNx2048
class _snake_case ( lowercase__):
def __init__( self : Any, __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Optional[Any], __lowercase : Optional[int] ):
lowercase__ = [json.loads(__lowercase ) for l in open(__lowercase )]
lowercase__ = os.path.dirname(__lowercase )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(__lowercase )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self : str ):
return len(self.data )
def __getitem__( self : List[str], __lowercase : Tuple ):
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=__lowercase ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir, self.data[index]["img"] ) ).convert("RGB" )
lowercase__ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self : Any ):
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = [len(row["sentence"] ) for row in batch]
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long )
lowercase__ = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = input_row["sentence"]
lowercase__ = 1
lowercase__ = torch.stack([row["image"] for row in batch] )
lowercase__ = torch.stack([row["label"] for row in batch] )
lowercase__ = torch.stack([row["image_start_token"] for row in batch] )
lowercase__ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __lowerCAmelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __lowerCAmelCase ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 713
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 0
|
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=lowercase__):
UpperCamelCase__ : Union[str, Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self : List[str], *__lowercase : List[Any], **__lowercase : Optional[Any] ):
requires_backends(self, ["transformers", "torch", "note_seq"] )
@classmethod
def A__ ( cls : Optional[int], *__lowercase : List[Any], **__lowercase : Tuple ):
requires_backends(cls, ["transformers", "torch", "note_seq"] )
@classmethod
def A__ ( cls : str, *__lowercase : str, **__lowercase : Any ):
requires_backends(cls, ["transformers", "torch", "note_seq"] )
| 714
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
def __lowerCAmelCase ( ):
lowercase__ = []
lowercase__ = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
lowercase__ = "".join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
while len(SCREAMING_SNAKE_CASE_ ) > 1:
lowercase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowercase__ = files.index(min(SCREAMING_SNAKE_CASE_ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE_ )
files.append(SCREAMING_SNAKE_CASE_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : Optional[Any], __lowercase : Optional[int]=7, __lowercase : List[Any]=3, __lowercase : int=18, __lowercase : List[Any]=30, __lowercase : List[Any]=400, __lowercase : Tuple=True, __lowercase : Union[str, Any]=None, __lowercase : Optional[int]=True, __lowercase : Dict=[0.5, 0.5, 0.5], __lowercase : Union[str, Any]=[0.5, 0.5, 0.5], ):
lowercase__ = size if size is not None else {"height": 18, "width": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =DPTImageProcessor if is_vision_available() else None
def A__ ( self : Union[str, Any] ):
lowercase__ = DPTImageProcessingTester(self )
@property
def A__ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : List[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
def A__ ( self : Tuple ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def A__ ( self : Optional[int] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 717
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self : Any, __lowercase : Union[str, Any], __lowercase : List[Any]=13, __lowercase : Tuple=7, __lowercase : List[Any]=True, __lowercase : Any=True, __lowercase : str=True, __lowercase : str=True, __lowercase : Union[str, Any]=True, __lowercase : List[Any]=False, __lowercase : List[Any]=False, __lowercase : List[str]=False, __lowercase : Optional[Any]=2, __lowercase : Any=99, __lowercase : Optional[Any]=0, __lowercase : Any=32, __lowercase : List[Any]=5, __lowercase : List[str]=4, __lowercase : Tuple=0.1, __lowercase : int=0.1, __lowercase : str=512, __lowercase : List[Any]=2, __lowercase : List[Any]=0.02, __lowercase : List[Any]=2, __lowercase : Any=4, __lowercase : str="last", __lowercase : Optional[int]=True, __lowercase : Any=None, __lowercase : Tuple=0, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
lowercase__ = bos_token_id
def A__ ( self : int ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], 2 ).float()
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A__ ( self : int ):
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def A__ ( self : List[str], __lowercase : Optional[int], __lowercase : int, __lowercase : str, __lowercase : Union[str, Any], __lowercase : List[str], __lowercase : str, __lowercase : Tuple, __lowercase : Dict, __lowercase : int, ):
lowercase__ = XLMModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, lengths=__lowercase, langs=__lowercase )
lowercase__ = model(__lowercase, langs=__lowercase )
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple, __lowercase : Dict, __lowercase : Optional[int], __lowercase : Tuple, __lowercase : List[Any], __lowercase : List[Any], __lowercase : Any, __lowercase : Dict, __lowercase : Optional[Any], __lowercase : Dict, ):
lowercase__ = XLMWithLMHeadModel(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[Any], __lowercase : Tuple, __lowercase : List[str], __lowercase : str, __lowercase : List[str], __lowercase : Tuple, __lowercase : List[str], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : int, ):
lowercase__ = XLMForQuestionAnsweringSimple(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
lowercase__ = model(__lowercase, start_positions=__lowercase, end_positions=__lowercase )
lowercase__ = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A__ ( self : Optional[int], __lowercase : str, __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple, __lowercase : str, __lowercase : Any, __lowercase : Dict, __lowercase : Union[str, Any], __lowercase : Any, ):
lowercase__ = XLMForQuestionAnswering(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
lowercase__ = model(
__lowercase, start_positions=__lowercase, end_positions=__lowercase, cls_index=__lowercase, is_impossible=__lowercase, p_mask=__lowercase, )
lowercase__ = model(
__lowercase, start_positions=__lowercase, end_positions=__lowercase, cls_index=__lowercase, is_impossible=__lowercase, )
((lowercase__ ) , ) = result_with_labels.to_tuple()
lowercase__ = model(__lowercase, start_positions=__lowercase, end_positions=__lowercase )
((lowercase__ ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def A__ ( self : Optional[int], __lowercase : int, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : List[Any], __lowercase : Tuple, __lowercase : Dict, __lowercase : Dict, __lowercase : Optional[int], __lowercase : Any, ):
lowercase__ = XLMForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def A__ ( self : Optional[int], __lowercase : Tuple, __lowercase : Dict, __lowercase : Tuple, __lowercase : Optional[Any], __lowercase : Union[str, Any], __lowercase : int, __lowercase : Optional[Any], __lowercase : int, __lowercase : Optional[int], ):
lowercase__ = self.num_labels
lowercase__ = XLMForTokenClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : int, __lowercase : List[Any], __lowercase : List[str], __lowercase : Dict, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Dict, ):
lowercase__ = self.num_choices
lowercase__ = XLMForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] ):
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Union[str, Any] =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ : Union[str, Any] =(
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A__ ( self : Tuple, __lowercase : str, __lowercase : Optional[Any], __lowercase : Any, __lowercase : Optional[Any], __lowercase : Tuple ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A__ ( self : str, __lowercase : str, __lowercase : Tuple, __lowercase : Optional[int]=False ):
lowercase__ = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__lowercase )
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__lowercase )
return inputs_dict
def A__ ( self : Any ):
lowercase__ = XLMModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, emb_dim=37 )
def A__ ( self : Tuple ):
self.config_tester.run_common_tests()
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowercase )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowercase )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowercase )
def A__ ( self : str ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowercase )
def A__ ( self : Optional[int], __lowercase : Optional[int], __lowercase : List[str], __lowercase : int, __lowercase : str, __lowercase : int, __lowercase : Any=False, __lowercase : Dict=1 ):
self.assertIsInstance(__lowercase, __lowercase )
self.assertListEqual(
[isinstance(__lowercase, __lowercase ) for iter_attentions in attentions], [True] * len(__lowercase ) )
self.assertEqual(len(__lowercase ), (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowercase ):
# adds PAD dummy token
lowercase__ = min_length + idx + 1
lowercase__ = min_length + idx + 1
lowercase__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(__lowercase ) )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : List[str], __lowercase : Optional[int], __lowercase : List[str], __lowercase : Tuple, __lowercase : Optional[int]=False, __lowercase : int=1 ):
self.assertIsInstance(__lowercase, __lowercase )
self.assertListEqual(
[isinstance(__lowercase, __lowercase ) for iter_hidden_states in hidden_states], [True] * len(__lowercase ), )
self.assertEqual(len(__lowercase ), (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowercase ):
# adds PAD dummy token
lowercase__ = min_length + idx + 1
lowercase__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(__lowercase ), )
pass
@slow
def A__ ( self : Dict ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = XLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : List[str] ):
lowercase__ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(__lowercase )
lowercase__ = torch.tensor([[14, 447]], dtype=torch.long, device=__lowercase ) # the president
lowercase__ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase__ = model.generate(__lowercase, do_sample=__lowercase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), __lowercase )
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowercase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def __lowerCAmelCase ( ):
lowercase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : List[Any]="replace", __lowercase : List[str]="<s>", __lowercase : Optional[Any]="</s>", __lowercase : int="</s>", __lowercase : Optional[Any]="<s>", __lowercase : str="<unk>", __lowercase : Tuple="<pad>", __lowercase : int="<mask>", __lowercase : Dict=False, **__lowercase : Optional[Any], ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else bos_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else eos_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else sep_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else cls_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else unk_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else mask_token
super().__init__(
errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, unk_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, )
with open(__lowercase, encoding="utf-8" ) as vocab_handle:
lowercase__ = json.load(__lowercase )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase, encoding="utf-8" ) as merges_handle:
lowercase__ = merges_handle.read().split("\n" )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def A__ ( self : List[str] ):
return len(self.encoder )
def A__ ( self : str ):
return dict(self.encoder, **self.added_tokens_encoder )
def A__ ( self : Optional[Any], __lowercase : Dict ):
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(__lowercase )
lowercase__ = get_pairs(__lowercase )
if not pairs:
return token
while True:
lowercase__ = min(__lowercase, key=lambda __lowercase : self.bpe_ranks.get(__lowercase, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(__lowercase ):
try:
lowercase__ = word.index(__lowercase, __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(__lowercase )
lowercase__ = new_word
if len(__lowercase ) == 1:
break
else:
lowercase__ = get_pairs(__lowercase )
lowercase__ = " ".join(__lowercase )
lowercase__ = word
return word
def A__ ( self : Tuple, __lowercase : Dict ):
lowercase__ = []
for token in re.findall(self.pat, __lowercase ):
lowercase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(" " ) )
return bpe_tokens
def A__ ( self : Union[str, Any], __lowercase : Union[str, Any] ):
return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) )
def A__ ( self : Optional[Any], __lowercase : int ):
return self.decoder.get(__lowercase )
def A__ ( self : List[str], __lowercase : Union[str, Any] ):
lowercase__ = "".join(__lowercase )
lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def A__ ( self : Tuple, __lowercase : str, __lowercase : Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
__lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(
__lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowercase, ensure_ascii=__lowercase ) + "\n" )
lowercase__ = 0
with open(__lowercase, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ = token_index
writer.write(" ".join(__lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
def A__ ( self : Optional[Any], __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Any, __lowercase : List[int], __lowercase : Optional[List[int]] = None, __lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase, token_ids_a=__lowercase, already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def A__ ( self : str, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[Any], __lowercase : Any, __lowercase : Dict=False, **__lowercase : Union[str, Any] ):
lowercase__ = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
lowercase__ = " " + text
return (text, kwargs)
| 719
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : List[str] =(("""num_inference_steps""", 5_0),)
def A__ ( self : Dict, **__lowercase : Dict ):
lowercase__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowercase )
return config
def A__ ( self : Optional[int], __lowercase : Optional[Any]=0, **__lowercase : Optional[int] ):
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", __lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**__lowercase )
lowercase__ = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
lowercase__ = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
lowercase__ = new_scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
lowercase__ = new_scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self : Optional[Any] ):
pass
def A__ ( self : Union[str, Any], __lowercase : List[str]=0, **__lowercase : Any ):
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", __lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
lowercase__ = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
lowercase__ = new_scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
lowercase__ = new_scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self : Dict, **__lowercase : Any ):
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**__lowercase )
lowercase__ = scheduler_class(**__lowercase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase__ = model(__lowercase, __lowercase )
lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase__ = model(__lowercase, __lowercase )
lowercase__ = scheduler.step_plms(__lowercase, __lowercase, __lowercase ).prev_sample
return sample
def A__ ( self : int ):
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", __lowercase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase, "set_timesteps" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(__lowercase, 0, __lowercase, **__lowercase ).prev_sample
lowercase__ = scheduler.step_prk(__lowercase, 1, __lowercase, **__lowercase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step_plms(__lowercase, 0, __lowercase, **__lowercase ).prev_sample
lowercase__ = scheduler.step_plms(__lowercase, 1, __lowercase, **__lowercase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def A__ ( self : str ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def A__ ( self : List[str] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowercase )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1 )
lowercase__ = scheduler_class(**__lowercase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), )
def A__ ( self : str ):
for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowercase, beta_end=__lowercase )
def A__ ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowercase )
def A__ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def A__ ( self : List[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=__lowercase )
def A__ ( self : int ):
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase )
def A__ ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase__ = 27
for scheduler_class in self.scheduler_classes:
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase ).prev_sample
def A__ ( self : List[str] ):
with self.assertRaises(__lowercase ):
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowercase )
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample
def A__ ( self : List[str] ):
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(__lowercase ) )
lowercase__ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def A__ ( self : Any ):
lowercase__ = self.full_loop(prediction_type="v_prediction" )
lowercase__ = torch.sum(torch.abs(__lowercase ) )
lowercase__ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def A__ ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase__ = self.full_loop(set_alpha_to_one=__lowercase, beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(__lowercase ) )
lowercase__ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def A__ ( self : List[str] ):
# We specify different beta, so that the first alpha is 0.99
lowercase__ = self.full_loop(set_alpha_to_one=__lowercase, beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(__lowercase ) )
lowercase__ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 721
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Union[str, Any] ="""unispeech-sat"""
def __init__( self : Union[str, Any], __lowercase : str=32, __lowercase : Any=768, __lowercase : Tuple=12, __lowercase : List[str]=12, __lowercase : int=3072, __lowercase : Optional[Any]="gelu", __lowercase : Tuple=0.1, __lowercase : List[Any]=0.1, __lowercase : Optional[int]=0.1, __lowercase : Optional[Any]=0.0, __lowercase : Optional[Any]=0.0, __lowercase : List[str]=0.1, __lowercase : str=0.1, __lowercase : Optional[int]=0.02, __lowercase : Optional[int]=1e-5, __lowercase : Any="group", __lowercase : Dict="gelu", __lowercase : List[Any]=(512, 512, 512, 512, 512, 512, 512), __lowercase : Dict=(5, 2, 2, 2, 2, 2, 2), __lowercase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2), __lowercase : List[Any]=False, __lowercase : Union[str, Any]=128, __lowercase : Optional[int]=16, __lowercase : Union[str, Any]=False, __lowercase : Optional[int]=True, __lowercase : List[Any]=0.05, __lowercase : Any=10, __lowercase : Tuple=2, __lowercase : Optional[Any]=0.0, __lowercase : str=10, __lowercase : List[Any]=0, __lowercase : Dict=320, __lowercase : str=2, __lowercase : Optional[int]=0.1, __lowercase : int=100, __lowercase : Any=256, __lowercase : str=256, __lowercase : Tuple=0.1, __lowercase : Dict="mean", __lowercase : Optional[Any]=False, __lowercase : Optional[Any]=False, __lowercase : Optional[Any]=256, __lowercase : Optional[int]=(512, 512, 512, 512, 1500), __lowercase : Optional[Any]=(5, 3, 3, 1, 1), __lowercase : Optional[int]=(1, 2, 3, 1, 1), __lowercase : Tuple=512, __lowercase : Tuple=0, __lowercase : str=1, __lowercase : Optional[Any]=2, __lowercase : List[str]=504, **__lowercase : Optional[int], ):
super().__init__(**__lowercase, pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = num_clusters
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = list(__lowercase )
lowercase__ = xvector_output_dim
@property
def A__ ( self : List[str] ):
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 700
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class _snake_case ( lowercase__):
UpperCamelCase__ : Any ="""timm_backbone"""
def __init__( self : Any, __lowercase : str=None, __lowercase : str=3, __lowercase : str=True, __lowercase : Union[str, Any]=True, __lowercase : Optional[int]=None, **__lowercase : int, ):
super().__init__(**__lowercase )
lowercase__ = backbone
lowercase__ = num_channels
lowercase__ = features_only
lowercase__ = use_pretrained_backbone
lowercase__ = True
lowercase__ = out_indices if out_indices is not None else (-1,)
| 701
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
lowercase__ = []
lowercase__ = 0
lowercase__ = sum(SCREAMING_SNAKE_CASE_ )
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
if sum(SCREAMING_SNAKE_CASE_ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE_ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE_ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE_ )
return
for index in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE_ , remaining_nums_sum - nums[index] , )
lowercase_ = [3, 34, 4, 12, 5, 2]
lowercase_ = 9
lowercase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
from math import sqrt
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ):
lowercase__ = 0
lowercase__ = 0
lowercase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 703
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Dict ="""falcon"""
UpperCamelCase__ : Any =["""past_key_values"""]
def __init__( self : Any, __lowercase : Tuple=6_5024, __lowercase : Optional[Any]=4544, __lowercase : Optional[Any]=32, __lowercase : str=71, __lowercase : Any=1e-5, __lowercase : int=0.02, __lowercase : Optional[int]=True, __lowercase : List[str]=0.0, __lowercase : Optional[int]=0.0, __lowercase : Optional[Any]=None, __lowercase : str=False, __lowercase : Tuple=False, __lowercase : List[str]=True, __lowercase : Optional[Any]=True, __lowercase : int=False, __lowercase : Optional[Any]=11, __lowercase : List[str]=11, **__lowercase : Tuple, ):
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("n_embed", __lowercase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase )
@property
def A__ ( self : int ):
return self.hidden_size // self.num_attention_heads
@property
def A__ ( self : str ):
return not self.alibi
| 704
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : List[Any] =GPTaTokenizer
def __init__( self : Tuple, __lowercase : Any=None, __lowercase : Dict=None, __lowercase : Union[str, Any]=None, __lowercase : int="<|endoftext|>", __lowercase : Tuple="<|endoftext|>", __lowercase : Dict="<|endoftext|>", __lowercase : Dict=False, **__lowercase : Optional[int], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, unk_token=__lowercase, bos_token=__lowercase, eos_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, )
lowercase__ = kwargs.pop("add_bos_token", __lowercase )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
def A__ ( self : List[str], *__lowercase : Dict, **__lowercase : Tuple ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Any, *__lowercase : int, **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : List[str], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[Any], __lowercase : "Conversation" ):
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase, add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 705
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
lowercase_ = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Union[str, Any] =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Union[str, Any] =ElectraTokenizer
def __init__( self : int, __lowercase : Union[str, Any]=None, __lowercase : Any=None, __lowercase : int=True, __lowercase : Tuple="[UNK]", __lowercase : List[str]="[SEP]", __lowercase : Union[str, Any]="[PAD]", __lowercase : List[str]="[CLS]", __lowercase : Union[str, Any]="[MASK]", __lowercase : Optional[Any]=True, __lowercase : Tuple=None, **__lowercase : Dict, ):
super().__init__(
__lowercase, tokenizer_file=__lowercase, do_lower_case=__lowercase, unk_token=__lowercase, sep_token=__lowercase, pad_token=__lowercase, cls_token=__lowercase, mask_token=__lowercase, tokenize_chinese_chars=__lowercase, strip_accents=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", __lowercase ) != do_lower_case
or normalizer_state.get("strip_accents", __lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars", __lowercase ) != tokenize_chinese_chars
):
lowercase__ = getattr(__lowercase, normalizer_state.pop("type" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**__lowercase )
lowercase__ = do_lower_case
def A__ ( self : Optional[Any], __lowercase : Optional[int], __lowercase : Any=None ):
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Any, __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
| 706
|
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ):
lowercase__ = limit + 1
lowercase__ = [0] * limit
for first_term in range(1 , SCREAMING_SNAKE_CASE_ ):
for n in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 707
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ):
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = conva_get[:2]
lowercase__ = conva_get[2]
lowercase__ = size_pa
lowercase__ = rate_w
lowercase__ = rate_t
lowercase__ = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self : Any, __lowercase : List[str] ):
# save model dict with pickle
lowercase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowercase, "wb" ) as f:
pickle.dump(__lowercase, __lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls : Dict, __lowercase : Union[str, Any] ):
# read saved model
with open(__lowercase, "rb" ) as f:
lowercase__ = pickle.load(__lowercase ) # noqa: S301
lowercase__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ = model_dic.get("size_pooling1" )
lowercase__ = model_dic.get("num_bp1" )
lowercase__ = model_dic.get("num_bp2" )
lowercase__ = model_dic.get("num_bp3" )
lowercase__ = model_dic.get("rate_weight" )
lowercase__ = model_dic.get("rate_thre" )
# create model instance
lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# modify model parameter
lowercase__ = model_dic.get("w_conv1" )
lowercase__ = model_dic.get("wkj" )
lowercase__ = model_dic.get("vji" )
lowercase__ = model_dic.get("thre_conv1" )
lowercase__ = model_dic.get("thre_bp2" )
lowercase__ = model_dic.get("thre_bp3" )
return conv_ins
def A__ ( self : str, __lowercase : List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self : List[str], __lowercase : Optional[Any] ):
return round(__lowercase, 3 )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ):
# convolution process
lowercase__ = convs[0]
lowercase__ = convs[1]
lowercase__ = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowercase__ = []
for i_focus in range(0, size_data - size_conv + 1, __lowercase ):
for j_focus in range(0, size_data - size_conv + 1, __lowercase ):
lowercase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ = []
lowercase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowercase__ = []
for i_focus in range(len(__lowercase ) ):
lowercase__ = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(
__lowercase, __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowercase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowercase__ = np.asarray(__lowercase )
return focus_list, data_featuremap
def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ):
# pooling process
lowercase__ = len(featuremaps[0] )
lowercase__ = int(size_map / size_pooling )
lowercase__ = []
for i_map in range(len(__lowercase ) ):
lowercase__ = featuremaps[i_map]
lowercase__ = []
for i_focus in range(0, __lowercase, __lowercase ):
for j_focus in range(0, __lowercase, __lowercase ):
lowercase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def A__ ( self : str, __lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase__ = []
for i in range(len(__lowercase ) ):
lowercase__ = np.shape(data[i] )
lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowercase__ = np.asarray(__lowercase )
return data_expanded
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
# expanding matrix to one dimension list
lowercase__ = np.asarray(__lowercase )
lowercase__ = np.shape(__lowercase )
lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = []
lowercase__ = 0
for i_map in range(__lowercase ):
lowercase__ = np.ones((size_map, size_map) )
for i in range(0, __lowercase, __lowercase ):
for j in range(0, __lowercase, __lowercase ):
lowercase__ = pd_pool[
i_pool
]
lowercase__ = i_pool + 1
lowercase__ = np.multiply(
__lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowercase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowercase )) )
lowercase__ = 0
lowercase__ = []
lowercase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ = np.asmatrix(datas_train[p] )
lowercase__ = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = np.shape(__lowercase )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ = np.multiply(
(data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.multiply(
np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.dot(__lowercase, self.vji )
lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ = pd_conva_pooled.T.getA().tolist()
lowercase__ = self._calculate_gradient_from_pool(
__lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase )
lowercase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ = rp + 1
lowercase__ = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase, "+-" )
plt.plot(__lowercase, "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowercase, alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self : List[str], __lowercase : Optional[int] ):
# model predict
lowercase__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowercase__ = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = bp_outa * self.vji.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def A__ ( self : int, __lowercase : Any ):
# return the data of image after convoluting process so we can check it out
lowercase__ = np.asmatrix(__lowercase )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37
| 0
|
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=lowercase__):
UpperCamelCase__ : Union[str, Any] =["""speech"""]
def __init__( self : List[Any], *__lowercase : Optional[Any], **__lowercase : str ):
requires_backends(self, ["speech"] )
class _snake_case ( metaclass=lowercase__):
UpperCamelCase__ : Optional[Any] =["""speech"""]
def __init__( self : Union[str, Any], *__lowercase : List[Any], **__lowercase : Tuple ):
requires_backends(self, ["speech"] )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _snake_case :
def __init__( self : Optional[int], __lowercase : Any, __lowercase : Optional[int]=13, __lowercase : Tuple=7, __lowercase : List[Any]=True, __lowercase : int=True, __lowercase : Dict=True, __lowercase : List[Any]=True, __lowercase : Optional[int]=99, __lowercase : Tuple=32, __lowercase : Dict=2, __lowercase : Any=4, __lowercase : Dict=37, __lowercase : List[Any]="gelu", __lowercase : str=0.1, __lowercase : Optional[int]=0.1, __lowercase : int=512, __lowercase : Any=16, __lowercase : List[str]=2, __lowercase : Union[str, Any]=0.02, __lowercase : str=3, __lowercase : str=4, __lowercase : Tuple=None, __lowercase : List[str]=1000, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = range_bbox
def A__ ( self : Dict ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = tf.convert_to_tensor(__lowercase )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[Any], __lowercase : List[Any], __lowercase : Any, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Union[str, Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str] ):
lowercase__ = TFLayoutLMModel(config=__lowercase )
lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
lowercase__ = model(__lowercase, __lowercase, token_type_ids=__lowercase )
lowercase__ = model(__lowercase, __lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A__ ( self : List[Any], __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : Tuple, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : str, __lowercase : List[Any] ):
lowercase__ = TFLayoutLMForMaskedLM(config=__lowercase )
lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : str, __lowercase : int, __lowercase : int, __lowercase : Optional[Any], __lowercase : int, __lowercase : List[str], __lowercase : List[Any], __lowercase : int, __lowercase : str ):
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMForSequenceClassification(config=__lowercase )
lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Any, __lowercase : List[str], __lowercase : List[Any], __lowercase : str, __lowercase : int, __lowercase : Tuple, __lowercase : Tuple, __lowercase : Any, __lowercase : Dict ):
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMForTokenClassification(config=__lowercase )
lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Union[str, Any], __lowercase : int, __lowercase : List[str], __lowercase : int, __lowercase : Optional[int], __lowercase : Dict, __lowercase : Tuple, __lowercase : Any, __lowercase : int ):
lowercase__ = TFLayoutLMForQuestionAnswering(config=__lowercase )
lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A__ ( self : int ):
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =(
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : Tuple =(
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : int =False
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : str =1_0
def A__ ( self : Optional[Any] ):
lowercase__ = TFLayoutLMModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, hidden_size=37 )
def A__ ( self : List[str] ):
self.config_tester.run_common_tests()
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def A__ ( self : Dict ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def A__ ( self : Optional[Any] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFLayoutLMModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def A__ ( self : Dict ):
pass
def __lowerCAmelCase ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowercase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : str ):
lowercase__ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
# test the sequence output on [0, :3, :3]
lowercase__ = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __lowercase, atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase__ = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], __lowercase, atol=1e-3 ) )
@slow
def A__ ( self : Optional[Any] ):
# initialize model with randomly initialized sequence classification head
lowercase__ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2 )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(
input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
lowercase__ = outputs.loss
lowercase__ = (2,)
self.assertEqual(loss.shape, __lowercase )
# test the shape of the logits
lowercase__ = outputs.logits
lowercase__ = (2, 2)
self.assertEqual(logits.shape, __lowercase )
@slow
def A__ ( self : List[str] ):
# initialize model with randomly initialized token classification head
lowercase__ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13 )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(
input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
# test the shape of the logits
lowercase__ = outputs.logits
lowercase__ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape, __lowercase )
@slow
def A__ ( self : Optional[Any] ):
# initialize model with randomly initialized token classification head
lowercase__ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs()
# forward pass
lowercase__ = model(input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
# test the shape of the logits
lowercase__ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape, __lowercase )
self.assertEqual(outputs.end_logits.shape, __lowercase )
| 709
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37
| 0
|
from math import ceil
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 1001 ):
lowercase__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase__ = 2 * i + 1
lowercase__ = 2 * i
lowercase__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowercase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 710
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ):
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = conva_get[:2]
lowercase__ = conva_get[2]
lowercase__ = size_pa
lowercase__ = rate_w
lowercase__ = rate_t
lowercase__ = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self : Any, __lowercase : List[str] ):
# save model dict with pickle
lowercase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowercase, "wb" ) as f:
pickle.dump(__lowercase, __lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls : Dict, __lowercase : Union[str, Any] ):
# read saved model
with open(__lowercase, "rb" ) as f:
lowercase__ = pickle.load(__lowercase ) # noqa: S301
lowercase__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ = model_dic.get("size_pooling1" )
lowercase__ = model_dic.get("num_bp1" )
lowercase__ = model_dic.get("num_bp2" )
lowercase__ = model_dic.get("num_bp3" )
lowercase__ = model_dic.get("rate_weight" )
lowercase__ = model_dic.get("rate_thre" )
# create model instance
lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# modify model parameter
lowercase__ = model_dic.get("w_conv1" )
lowercase__ = model_dic.get("wkj" )
lowercase__ = model_dic.get("vji" )
lowercase__ = model_dic.get("thre_conv1" )
lowercase__ = model_dic.get("thre_bp2" )
lowercase__ = model_dic.get("thre_bp3" )
return conv_ins
def A__ ( self : str, __lowercase : List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self : List[str], __lowercase : Optional[Any] ):
return round(__lowercase, 3 )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ):
# convolution process
lowercase__ = convs[0]
lowercase__ = convs[1]
lowercase__ = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowercase__ = []
for i_focus in range(0, size_data - size_conv + 1, __lowercase ):
for j_focus in range(0, size_data - size_conv + 1, __lowercase ):
lowercase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ = []
lowercase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowercase__ = []
for i_focus in range(len(__lowercase ) ):
lowercase__ = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(
__lowercase, __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowercase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowercase__ = np.asarray(__lowercase )
return focus_list, data_featuremap
def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ):
# pooling process
lowercase__ = len(featuremaps[0] )
lowercase__ = int(size_map / size_pooling )
lowercase__ = []
for i_map in range(len(__lowercase ) ):
lowercase__ = featuremaps[i_map]
lowercase__ = []
for i_focus in range(0, __lowercase, __lowercase ):
for j_focus in range(0, __lowercase, __lowercase ):
lowercase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def A__ ( self : str, __lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase__ = []
for i in range(len(__lowercase ) ):
lowercase__ = np.shape(data[i] )
lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowercase__ = np.asarray(__lowercase )
return data_expanded
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
# expanding matrix to one dimension list
lowercase__ = np.asarray(__lowercase )
lowercase__ = np.shape(__lowercase )
lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = []
lowercase__ = 0
for i_map in range(__lowercase ):
lowercase__ = np.ones((size_map, size_map) )
for i in range(0, __lowercase, __lowercase ):
for j in range(0, __lowercase, __lowercase ):
lowercase__ = pd_pool[
i_pool
]
lowercase__ = i_pool + 1
lowercase__ = np.multiply(
__lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowercase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowercase )) )
lowercase__ = 0
lowercase__ = []
lowercase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ = np.asmatrix(datas_train[p] )
lowercase__ = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = np.shape(__lowercase )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ = np.multiply(
(data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.multiply(
np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.dot(__lowercase, self.vji )
lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ = pd_conva_pooled.T.getA().tolist()
lowercase__ = self._calculate_gradient_from_pool(
__lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase )
lowercase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ = rp + 1
lowercase__ = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase, "+-" )
plt.plot(__lowercase, "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowercase, alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self : List[str], __lowercase : Optional[int] ):
# model predict
lowercase__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowercase__ = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = bp_outa * self.vji.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def A__ ( self : int, __lowercase : Any ):
# return the data of image after convoluting process so we can check it out
lowercase__ = np.asmatrix(__lowercase )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 711
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =TransfoXLTokenizer
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : Union[str, Any] ):
super().setUp()
lowercase__ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self : Union[str, Any], **__lowercase : Any ):
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Tuple, __lowercase : Optional[int] ):
lowercase__ = "<unk> UNwanted , running"
lowercase__ = "<unk> unwanted, running"
return input_text, output_text
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase )
lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
lowercase__ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.get_tokenizer()
lowercase__ = len(__lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1", 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ), [1] )
self.assertEqual(tokenizer.decode([1] ), "new1" )
| 37
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
lowercase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase__ = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["module"]
# Load the entity vocab file
lowercase__ = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
lowercase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ = AddedToken("<ent>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
lowercase__ = AddedToken("<ent2>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "r" ) as f:
lowercase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase__ = "MLukeTokenizer"
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
lowercase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
lowercase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
lowercase__ = state_dict["embeddings.word_embeddings.weight"]
lowercase__ = word_emb[ent_init_index].unsqueeze(0 )
lowercase__ = word_emb[enta_init_index].unsqueeze(0 )
lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase__ = state_dict[bias_name]
lowercase__ = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase__ = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ = f'''encoder.layer.{layer_index}.attention.self.'''
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
lowercase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowercase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase__ = state_dict["entity_predictions.bias"]
lowercase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowercase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase__ = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
lowercase__ = state_dict[key]
else:
lowercase__ = state_dict[key]
lowercase__ , lowercase__ = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase__ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task="entity_classification" )
lowercase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowercase__ = (0, 9)
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" )
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ = torch.Size((1, 33, 768) )
lowercase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ = torch.Size((1, 1, 768) )
lowercase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase__ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ = "Tokyo is the capital of <mask>."
lowercase__ = (24, 30)
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" )
lowercase__ = model(**SCREAMING_SNAKE_CASE_ )
lowercase__ = encoding["input_ids"][0].tolist()
lowercase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
lowercase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.entity_logits[0][0].argmax().item()
lowercase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = ["[MASK]", "[PAD]", "[UNK]"]
lowercase__ = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
lowercase__ = {}
for entry in data:
lowercase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase__ = entity_id
break
lowercase__ = f'''{language}:{entity_name}'''
lowercase__ = entity_id
return new_mapping
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 712
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _snake_case ( lowercase__):
UpperCamelCase__ : List[str] ="""xglm"""
UpperCamelCase__ : Optional[Any] =["""past_key_values"""]
UpperCamelCase__ : int ={
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str, __lowercase : Any=25_6008, __lowercase : List[str]=2048, __lowercase : List[Any]=1024, __lowercase : Union[str, Any]=4096, __lowercase : Any=24, __lowercase : List[Any]=16, __lowercase : Tuple="gelu", __lowercase : Tuple=0.1, __lowercase : List[Any]=0.1, __lowercase : List[Any]=0.0, __lowercase : Optional[Any]=0.0, __lowercase : List[Any]=0.02, __lowercase : Optional[int]=True, __lowercase : Tuple=True, __lowercase : int=2, __lowercase : Dict=1, __lowercase : Any=0, __lowercase : List[Any]=2, **__lowercase : Optional[Any], ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = ffn_dim
lowercase__ = num_layers
lowercase__ = attention_heads
lowercase__ = activation_function
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = layerdrop
lowercase__ = init_std
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_cache
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, )
| 713
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
# use last results for better performance - dynamic programming
lowercase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ = j
return prefix_result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return max(prefix_function(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _snake_case ( lowercase__):
def __init__( self : List[str], __lowercase : Union[str, Any], __lowercase : Dict ):
lowercase__ = params
lowercase__ = np.array(__lowercase )
lowercase__ = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str, __lowercase : List[str] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : str ):
return len(self.lengths )
def A__ ( self : Optional[Any] ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.params.max_model_input_size
lowercase__ = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(__lowercase : Any, __lowercase : List[Any] ):
return [l[i : i + n] for i in range(0, len(__lowercase ), __lowercase )]
lowercase__ = []
lowercase__ = []
if self.params.mlm:
lowercase__ , lowercase__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase__ , lowercase__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids, self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__ = []
for sub_s in divide_chunks(seq_, max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__ = np.insert(__lowercase, 0, __lowercase )
if sub_s[-1] != sep_id:
lowercase__ = np.insert(__lowercase, len(__lowercase ), __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
lowercase__ = np.array(__lowercase )
lowercase__ = np.array(__lowercase )
def A__ ( self : Tuple ):
lowercase__ = len(self )
lowercase__ = self.lengths > 11
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A__ ( self : str ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ = self.params.special_tok_ids["unk_token"]
lowercase__ = len(self )
lowercase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__ = (unk_occs / self.lengths) < 0.5
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A__ ( self : Tuple ):
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A__ ( self : List[Any], __lowercase : Optional[int] ):
lowercase__ = [t[0] for t in batch]
lowercase__ = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
lowercase__ = max(__lowercase )
# Pad token ids
if self.params.mlm:
lowercase__ = self.params.special_tok_ids["pad_token"]
else:
lowercase__ = self.params.special_tok_ids["unk_token"]
lowercase__ = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
lowercase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__ = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 716
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 717
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( lowercase__):
UpperCamelCase__ : UNetaDModel
UpperCamelCase__ : KarrasVeScheduler
def __init__( self : Dict, __lowercase : UNetaDModel, __lowercase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=__lowercase, scheduler=__lowercase )
@torch.no_grad()
def __call__( self : Optional[Any], __lowercase : int = 1, __lowercase : int = 50, __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, __lowercase : Optional[str] = "pil", __lowercase : bool = True, **__lowercase : int, ):
lowercase__ = self.unet.config.sample_size
lowercase__ = (batch_size, 3, img_size, img_size)
lowercase__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase__ = randn_tensor(__lowercase, generator=__lowercase, device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowercase__ = self.scheduler.schedule[t]
lowercase__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase__ , lowercase__ = self.scheduler.add_noise_to_input(__lowercase, __lowercase, generator=__lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase__ = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase__ = self.scheduler.step(__lowercase, __lowercase, __lowercase, __lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2 ).sample
lowercase__ = self.scheduler.step_correct(
__lowercase, __lowercase, __lowercase, __lowercase, step_output.prev_sample, step_output["derivative"], )
lowercase__ = step_output.prev_sample
lowercase__ = (sample / 2 + 0.5).clamp(0, 1 )
lowercase__ = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =WavaVecaPhonemeCTCTokenizer
UpperCamelCase__ : List[Any] =False
def A__ ( self : str ):
super().setUp()
lowercase__ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
def A__ ( self : Optional[Any], __lowercase : Union[str, Any], __lowercase : Optional[int]=False, __lowercase : Optional[int]=20, __lowercase : str=5 ):
lowercase__ = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=__lowercase )) for i in range(len(__lowercase ) )]
lowercase__ = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1], do_phonemize=__lowercase ), __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(__lowercase, clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=__lowercase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
return output_txt, output_ids
def A__ ( self : Dict, **__lowercase : Optional[int] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Dict ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
lowercase__ = tokenizer("m xxx ɪ", do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase, [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
lowercase__ = tokenizer("m aaa ɪ ccc", do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase, [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowercase__ = tokenizer("maɪ c", do_phonemize=__lowercase ).input_ids
self.assertEqual(__lowercase, [3, 200] ) # mai should be <unk> (=3)
def A__ ( self : Union[str, Any] ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(__lowercase, "h ə l oʊ h aʊ ɑːɹ j uː" )
def A__ ( self : List[str] ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowercase ).input_ids, tokenizer(__lowercase, do_phonemize=__lowercase ).input_ids )
def A__ ( self : str ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
lowercase__ = tokenizer.decode(tokenizer(__lowercase ).input_ids )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowercase__ = tokenizer.decode(sample_ids[0] )
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase, batch_tokens[0] )
self.assertEqual(__lowercase, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def A__ ( self : str ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(__lowercase, "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowercase ).input_ids, tokenizer(__lowercase, do_phonemize=__lowercase ).input_ids )
def A__ ( self : Tuple ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowercase__ = tokenizer.decode(sample_ids[0] )
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase, batch_tokens[0] )
self.assertEqual(__lowercase, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
lowercase__ = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=__lowercase )
lowercase__ = tokenizer.batch_decode(__lowercase, filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase, batch_tokens[0] )
self.assertEqual(__lowercase, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def A__ ( self : int ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
lowercase__ = tokenizer.decode(tokenizer(__lowercase ).input_ids, filter_word_delimiter_token=__lowercase )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer.phonemize(__lowercase, phonemizer_lang="en-us" )
lowercase__ = tokenizer.decode(tokenizer(__lowercase ).input_ids, filter_word_delimiter_token=__lowercase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip(), __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=__lowercase )
lowercase__ = "Hello how are you"
lowercase__ = tokenizer(__lowercase, phonemizer_lang="en-us" ).input_ids
lowercase__ = tokenizer(__lowercase, phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__lowercase, __lowercase )
lowercase__ = tokenizer.decode(__lowercase )
lowercase__ = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase, "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__lowercase, "ɛ l o h aʊ a ʁ j u" )
def A__ ( self : List[Any] ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowercase__ = "Hello how Are you"
lowercase__ = "hello how are you"
lowercase__ = tokenizer(__lowercase ).input_ids
lowercase__ = tokenizer(__lowercase ).input_ids
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : Dict ):
lowercase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowercase__ = tokenizer.batch_decode(__lowercase )
self.assertEqual(__lowercase, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def A__ ( __lowercase : Tuple, __lowercase : Any ):
lowercase__ = [d[key] for d in offsets]
return retrieved_list
def A__ ( self : List[Any] ):
lowercase__ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowercase__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowercase__ = tokenizer.decode(__lowercase, output_char_offsets=__lowercase, filter_word_delimiter_token=__lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ), 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__lowercase, __lowercase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char" ) ), outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "char" ), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "start_offset" ), [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "end_offset" ), [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A__ ( self : List[Any] ):
lowercase__ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__lowercase : Optional[int], __lowercase : Optional[int] ):
self.assertTrue(isinstance(__lowercase, __lowercase ) )
self.assertTrue(isinstance(outputs_list[0], __lowercase ) )
# transform list to ModelOutput
lowercase__ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"], outputs_batch_a["text"] )
def recursive_check(__lowercase : Dict, __lowercase : Tuple ):
if isinstance(__lowercase, __lowercase ):
[recursive_check(__lowercase, __lowercase ) for la, la in zip(__lowercase, __lowercase )]
self.assertEqual(__lowercase, __lowercase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"], outputs_batch_a["char_offsets"] )
# fmt: off
lowercase__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowercase__ = tokenizer.batch_decode(__lowercase, output_char_offsets=__lowercase )
lowercase__ = [tokenizer.decode(__lowercase, output_char_offsets=__lowercase ) for ids in sample_ids]
check_list_tuples_equal(__lowercase, __lowercase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def A__ ( self : Optional[int] ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def A__ ( self : Optional[int] ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def A__ ( self : Any ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def A__ ( self : Dict ):
pass
def A__ ( self : Optional[Any] ):
lowercase__ = self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = tokenizer.vocab_size
lowercase__ = len(__lowercase )
self.assertNotEqual(__lowercase, 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowercase__ = tokenizer.add_tokens(__lowercase )
lowercase__ = tokenizer.vocab_size
lowercase__ = len(__lowercase )
self.assertNotEqual(__lowercase, 0 )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, len(__lowercase ) )
self.assertEqual(__lowercase, all_size + len(__lowercase ) )
lowercase__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ), 4 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
lowercase__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowercase__ = tokenizer.add_special_tokens(__lowercase )
lowercase__ = tokenizer.vocab_size
lowercase__ = len(__lowercase )
self.assertNotEqual(__lowercase, 0 )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, len(__lowercase ) )
self.assertEqual(__lowercase, all_size_a + len(__lowercase ) )
lowercase__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ), 6 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0], tokens[1] )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokens[-4] )
self.assertEqual(tokens[0], tokenizer.eos_token_id )
self.assertEqual(tokens[-3], tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def A__ ( self : List[Any] ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def A__ ( self : str ):
pass
def A__ ( self : int ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowercase__ = self.get_tokenizers(fast=__lowercase, do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
lowercase__ = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(output["text"], __lowercase )
| 719
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowercase_ = True
except (ImportError, ModuleNotFoundError):
lowercase_ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 720
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ : str ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowercase_ = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 721
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# initialize config
if "resnet-50" in model_name:
lowercase__ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
lowercase__ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
lowercase__ = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE_ , backbone_config=SCREAMING_SNAKE_CASE_ )
# set label attributes
lowercase__ = "panoptic" in model_name
if is_panoptic:
lowercase__ = 250
else:
lowercase__ = 91
lowercase__ = "huggingface/label-files"
lowercase__ = "coco-detection-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = ""
if is_panoptic:
lowercase__ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ):
lowercase__ , lowercase__ = get_detr_config(SCREAMING_SNAKE_CASE_ )
# load original model from torch hub
lowercase__ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
lowercase__ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE_ ).eval()
lowercase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE_ ):
if is_panoptic:
lowercase__ = "detr." + src
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
# finally, create HuggingFace model and load state dict
lowercase__ = DetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify our conversion on an image
lowercase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowercase__ = DetrImageProcessor(format=SCREAMING_SNAKE_CASE_ )
lowercase__ = processor(images=prepare_img() , return_tensors="pt" )
lowercase__ = encoding["pixel_values"]
lowercase__ = detr(SCREAMING_SNAKE_CASE_ )
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
lowercase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37
| 0
|
from statistics import mean, stdev
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 ):
lowercase__ = min(SCREAMING_SNAKE_CASE_ )
lowercase__ = max(SCREAMING_SNAKE_CASE_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 ):
lowercase__ = mean(SCREAMING_SNAKE_CASE_ )
lowercase__ = stdev(SCREAMING_SNAKE_CASE_ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
| 701
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 0
|
from math import sqrt
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 1_0000 ):
lowercase__ = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 384
if "tiny" in model_name:
lowercase__ = [3, 3, 9, 3]
lowercase__ = [96, 192, 384, 768]
if "small" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [96, 192, 384, 768]
if "base" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [128, 256, 512, 1024]
lowercase__ = 512
if "large" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [192, 384, 768, 1536]
lowercase__ = 768
if "xlarge" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [256, 512, 1024, 2048]
lowercase__ = 1024
# set label information
lowercase__ = 150
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE_ , hidden_sizes=SCREAMING_SNAKE_CASE_ , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowercase__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["state_dict"]
lowercase__ = get_upernet_config(SCREAMING_SNAKE_CASE_ )
lowercase__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
lowercase__ = key.replace("bn" , "batch_norm" )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
lowercase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
if model_name == "upernet-convnext-tiny":
lowercase__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
lowercase__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
lowercase__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
lowercase__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
lowercase__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase_ = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
lowercase_ = parser.parse_args()
lowercase_ = """cpu"""
lowercase_ = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
lowercase_ = """path-to-your-trained-model"""
lowercase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase_ = pipe.to(device)
# to channels last
lowercase_ = pipe.unet.to(memory_format=torch.channels_last)
lowercase_ = pipe.vae.to(memory_format=torch.channels_last)
lowercase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase_ = torch.randn(2, 4, 64, 64)
lowercase_ = torch.rand(1) * 999
lowercase_ = torch.randn(2, 77, 768)
lowercase_ = (sample, timestep, encoder_hidden_status)
try:
lowercase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase_ = 666
lowercase_ = torch.Generator(device).manual_seed(seed)
lowercase_ = {"""generator""": generator}
if args.steps is not None:
lowercase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 704
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase_ = logging.getLogger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 2 ):
def get_dataset(SCREAMING_SNAKE_CASE_ ):
lowercase__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase__ = get_dataset(SCREAMING_SNAKE_CASE_ )
lowercase__ = get_dataset(SCREAMING_SNAKE_CASE_ )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
lowercase__ = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase__ , lowercase__ = batch
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _snake_case ( nn.Module):
def __init__( self : List[Any] ):
super().__init__()
lowercase__ = nn.Parameter(torch.randn(1 ) )
lowercase__ = nn.Parameter(torch.randn(1 ) )
def A__ ( self : Tuple, __lowercase : int ):
return x * self.a + self.b
class _snake_case ( unittest.TestCase):
def A__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(total_limit=1, project_dir=__lowercase, automatic_checkpoint_naming=__lowercase )
# Train baseline
lowercase__ = Accelerator(project_config=__lowercase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ), 1 )
def A__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
lowercase__ , lowercase__ = dummy_dataloaders()
# Train baseline
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase )
# Save initial
lowercase__ = os.path.join(__lowercase, "initial" )
accelerator.save_state(__lowercase )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
lowercase__ = train(3, __lowercase, __lowercase, __lowercase, __lowercase )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase )
accelerator.load_state(__lowercase )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
lowercase__ = train(2, __lowercase, __lowercase, __lowercase, __lowercase )
# Save everything
lowercase__ = os.path.join(__lowercase, "checkpoint" )
accelerator.save_state(__lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowercase )
test_rands += train(1, __lowercase, __lowercase, __lowercase, __lowercase )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
lowercase__ = Accelerator(project_dir=__lowercase, project_config=__lowercase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase )
# Save initial
accelerator.save_state()
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
lowercase__ = train(3, __lowercase, __lowercase, __lowercase, __lowercase )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=__lowercase )
lowercase__ = Accelerator(project_dir=__lowercase, project_config=__lowercase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase )
accelerator.load_state(os.path.join(__lowercase, "checkpoints", "checkpoint_0" ) )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
lowercase__ = train(2, __lowercase, __lowercase, __lowercase, __lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase, "checkpoints", "checkpoint_1" ) )
test_rands += train(1, __lowercase, __lowercase, __lowercase, __lowercase )
((lowercase__) , (lowercase__)) = model.a.item(), model.b.item()
lowercase__ = optimizer.state_dict()
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
self.assertEqual(__lowercase, __lowercase )
def A__ ( self : List[str] ):
lowercase__ = torch.tensor([1, 2, 3] )
lowercase__ = torch.tensor([2, 3, 4] )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(net.parameters() )
lowercase__ = Accelerator()
with self.assertRaises(__lowercase ) as ve:
accelerator.register_for_checkpointing(__lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3 )
lowercase__ = torch.optim.lr_scheduler.StepLR(__lowercase, step_size=1, gamma=0.99 )
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
lowercase__ = Accelerator(project_dir=__lowercase, project_config=__lowercase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# Save initial
accelerator.save_state()
lowercase__ = scheduler.state_dict()
train(3, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
self.assertNotEqual(__lowercase, scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase, "checkpoints", "checkpoint_0" ) )
self.assertEqual(__lowercase, scheduler.state_dict() )
def A__ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase__ = DummyModel()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=__lowercase, total_limit=2 )
# Train baseline
lowercase__ = Accelerator(project_dir=__lowercase, project_config=__lowercase )
lowercase__ = accelerator.prepare(__lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowercase, "checkpoints", "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase, "checkpoints", "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase, "checkpoints", "checkpoint_10" ) ) )
@require_cuda
def A__ ( self : Tuple ):
lowercase__ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowercase, env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = """/tmp/accelerate/state_checkpointing"""
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase_ , lowercase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase_ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
lowercase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
lowercase_ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
lowercase_ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 706
|
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 707
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ):
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = bp_numa
lowercase__ = conva_get[:2]
lowercase__ = conva_get[2]
lowercase__ = size_pa
lowercase__ = rate_w
lowercase__ = rate_t
lowercase__ = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 )
lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self : Any, __lowercase : List[str] ):
# save model dict with pickle
lowercase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowercase, "wb" ) as f:
pickle.dump(__lowercase, __lowercase )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls : Dict, __lowercase : Union[str, Any] ):
# read saved model
with open(__lowercase, "rb" ) as f:
lowercase__ = pickle.load(__lowercase ) # noqa: S301
lowercase__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase__ = model_dic.get("size_pooling1" )
lowercase__ = model_dic.get("num_bp1" )
lowercase__ = model_dic.get("num_bp2" )
lowercase__ = model_dic.get("num_bp3" )
lowercase__ = model_dic.get("rate_weight" )
lowercase__ = model_dic.get("rate_thre" )
# create model instance
lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase )
# modify model parameter
lowercase__ = model_dic.get("w_conv1" )
lowercase__ = model_dic.get("wkj" )
lowercase__ = model_dic.get("vji" )
lowercase__ = model_dic.get("thre_conv1" )
lowercase__ = model_dic.get("thre_bp2" )
lowercase__ = model_dic.get("thre_bp3" )
return conv_ins
def A__ ( self : str, __lowercase : List[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self : List[str], __lowercase : Optional[Any] ):
return round(__lowercase, 3 )
def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ):
# convolution process
lowercase__ = convs[0]
lowercase__ = convs[1]
lowercase__ = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowercase__ = []
for i_focus in range(0, size_data - size_conv + 1, __lowercase ):
for j_focus in range(0, size_data - size_conv + 1, __lowercase ):
lowercase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__ = []
lowercase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowercase__ = []
for i_focus in range(len(__lowercase ) ):
lowercase__ = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(
__lowercase, __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowercase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowercase__ = np.asarray(__lowercase )
return focus_list, data_featuremap
def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ):
# pooling process
lowercase__ = len(featuremaps[0] )
lowercase__ = int(size_map / size_pooling )
lowercase__ = []
for i_map in range(len(__lowercase ) ):
lowercase__ = featuremaps[i_map]
lowercase__ = []
for i_focus in range(0, __lowercase, __lowercase ):
for j_focus in range(0, __lowercase, __lowercase ):
lowercase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def A__ ( self : str, __lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
lowercase__ = []
for i in range(len(__lowercase ) ):
lowercase__ = np.shape(data[i] )
lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] )
lowercase__ = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowercase__ = np.asarray(__lowercase )
return data_expanded
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
# expanding matrix to one dimension list
lowercase__ = np.asarray(__lowercase )
lowercase__ = np.shape(__lowercase )
lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] )
return data_expanded
def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ):
lowercase__ = []
lowercase__ = 0
for i_map in range(__lowercase ):
lowercase__ = np.ones((size_map, size_map) )
for i in range(0, __lowercase, __lowercase ):
for j in range(0, __lowercase, __lowercase ):
lowercase__ = pd_pool[
i_pool
]
lowercase__ = i_pool + 1
lowercase__ = np.multiply(
__lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowercase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowercase )) )
lowercase__ = 0
lowercase__ = []
lowercase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase__ = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__ = np.asmatrix(datas_train[p] )
lowercase__ = np.asarray(datas_teach[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = np.shape(__lowercase )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa
lowercase__ = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__ = np.multiply(
(data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.multiply(
np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) )
lowercase__ = np.dot(__lowercase, self.vji )
lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__ = pd_conva_pooled.T.getA().tolist()
lowercase__ = self._calculate_gradient_from_pool(
__lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__ = self._expand_mat(pd_conva_all[k_conv] )
lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase )
lowercase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__ = rp + 1
lowercase__ = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase, "+-" )
plt.plot(__lowercase, "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowercase, alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self : List[str], __lowercase : Optional[int] ):
# model predict
lowercase__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowercase__ = np.asmatrix(datas_test[p] )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
lowercase__ = self._expand(__lowercase )
lowercase__ = data_bp_input
lowercase__ = bp_outa * self.vji.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
lowercase__ = bp_outa * self.wkj.T - self.thre_bpa
lowercase__ = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def A__ ( self : int, __lowercase : Any ):
# return the data of image after convoluting process so we can check it out
lowercase__ = np.asmatrix(__lowercase )
lowercase__ , lowercase__ = self.convolute(
__lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
lowercase__ = self.pooling(__lowercase, self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37
| 0
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_="" ):
lowercase__ = tempfile.mkdtemp()
return os.path.join(SCREAMING_SNAKE_CASE_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _snake_case ( unittest.TestCase):
def A__ ( self : List[str] ):
lowercase__ = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase__ = AgentAudio(__lowercase )
lowercase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowercase, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__lowercase ) )
# Ensure that the file contains the same value as the original tensor
lowercase__ , lowercase__ = sf.read(__lowercase )
self.assertTrue(torch.allclose(__lowercase, torch.tensor(__lowercase ), atol=1e-4 ) )
def A__ ( self : str ):
lowercase__ = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase__ = get_new_path(suffix=".wav" )
sf.write(__lowercase, __lowercase, 1_6000 )
lowercase__ = AgentAudio(__lowercase )
self.assertTrue(torch.allclose(__lowercase, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), __lowercase )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase):
def A__ ( self : Optional[int] ):
lowercase__ = torch.randint(0, 256, (64, 64, 3) )
lowercase__ = AgentImage(__lowercase )
lowercase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowercase, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowercase ) )
def A__ ( self : Dict ):
lowercase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowercase__ = Image.open(__lowercase )
lowercase__ = AgentImage(__lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowercase ) )
def A__ ( self : Optional[int] ):
lowercase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowercase__ = Image.open(__lowercase )
lowercase__ = AgentImage(__lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowercase ) )
class _snake_case ( unittest.TestCase):
def A__ ( self : Union[str, Any] ):
lowercase__ = "Hey!"
lowercase__ = AgentText(__lowercase )
self.assertEqual(__lowercase, agent_type.to_string() )
self.assertEqual(__lowercase, agent_type.to_raw() )
self.assertEqual(__lowercase, __lowercase )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
| 0
|
import math
import qiskit
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1 ):
if (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(SCREAMING_SNAKE_CASE_ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE_ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE_ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowercase__ = qiskit.QuantumRegister(4 , "qr" )
lowercase__ = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowercase__ = [input_a, input_a, carry_in]
lowercase__ = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE_ ) # measure the last two qbits
lowercase__ = qiskit.Aer.get_backend("aer_simulator" )
lowercase__ = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 709
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( lowercase__):
def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ):
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ), " " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ),
pre_tokenizers.Digits(individual_digits=__lowercase ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__lowercase, __lowercase )
def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
if isinstance(__lowercase, __lowercase ):
lowercase__ = [files]
self._tokenizer.train(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ):
lowercase__ = trainers.UnigramTrainer(
vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, )
self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase )
self.add_unk_id()
def A__ ( self : str ):
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
| 37
| 0
|
from __future__ import annotations
import math
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
def __lowerCAmelCase ( ):
lowercase__ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 711
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Dict =TransfoXLTokenizer
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : Union[str, Any] ):
super().setUp()
lowercase__ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self : Union[str, Any], **__lowercase : Any ):
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def A__ ( self : Tuple, __lowercase : Optional[int] ):
lowercase__ = "<unk> UNwanted , running"
lowercase__ = "<unk> unwanted, running"
return input_text, output_text
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase )
lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] )
def A__ ( self : Tuple ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self : str ):
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
lowercase__ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.get_tokenizer()
lowercase__ = len(__lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1", 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ), [1] )
self.assertEqual(tokenizer.decode([1] ), "new1" )
| 37
| 0
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCAmelCase ( ):
lowercase__ , lowercase__ = 9, 14 # noqa: F841
lowercase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase__ = defaultdict(SCREAMING_SNAKE_CASE_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowercase__ = mst(SCREAMING_SNAKE_CASE_ )
lowercase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowercase__ = tuple(answer[:2] )
lowercase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 712
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCAmelCase ( ):
lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ )
raise ValueError(SCREAMING_SNAKE_CASE_ )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Optional[int], __lowercase : int, __lowercase : List[str]=7, __lowercase : List[Any]=3, __lowercase : int=30, __lowercase : str=400, __lowercase : Any=True, __lowercase : int=None, __lowercase : Dict=True, __lowercase : List[Any]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], __lowercase : int=True, __lowercase : Optional[Any]=1 / 255, __lowercase : Optional[Any]=True, ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
def A__ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self : List[str], __lowercase : int, __lowercase : List[Any]=False ):
if not batched:
lowercase__ = image_inputs[0]
if isinstance(__lowercase, Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
if w < h:
lowercase__ = int(self.size["shortest_edge"] * h / w )
lowercase__ = self.size["shortest_edge"]
elif w > h:
lowercase__ = self.size["shortest_edge"]
lowercase__ = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ = self.size["shortest_edge"]
lowercase__ = self.size["shortest_edge"]
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(__lowercase, key=lambda __lowercase : item[0] )[0]
lowercase__ = max(__lowercase, key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self : List[str] ):
lowercase__ = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
def A__ ( self : Any ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad, __lowercase )
lowercase__ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad, __lowercase )
def A__ ( self : List[str] ):
pass
def A__ ( self : List[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase, batched=__lowercase )
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def A__ ( self : List[str] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase, batched=__lowercase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def A__ ( self : List[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(__lowercase, batched=__lowercase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def A__ ( self : int ):
# prepare image and target
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"image_id": 3_9769, "annotations": target}
# encode them
lowercase__ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowercase__ = image_processing(images=__lowercase, annotations=__lowercase, return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape, __lowercase )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __lowercase, atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __lowercase ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape, __lowercase )
lowercase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __lowercase, atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __lowercase ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __lowercase ) )
# verify class_labels
lowercase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __lowercase ) )
# verify orig_size
lowercase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __lowercase ) )
# verify size
lowercase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __lowercase ) )
@slow
def A__ ( self : Optional[int] ):
# prepare image, target and masks_path
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowercase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ = ConditionalDetrImageProcessor(format="coco_panoptic" )
lowercase__ = image_processing(images=__lowercase, annotations=__lowercase, masks_path=__lowercase, return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape, __lowercase )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __lowercase, atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __lowercase ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape, __lowercase )
lowercase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __lowercase, atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __lowercase ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __lowercase ) )
# verify class_labels
lowercase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __lowercase ) )
# verify masks
lowercase__ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), __lowercase )
# verify orig_size
lowercase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __lowercase ) )
# verify size
lowercase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __lowercase ) )
| 713
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase__):
@staticmethod
def A__ ( __lowercase : ArgumentParser ):
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ):
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def A__ ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(__lowercase )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowercase, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" )
out_lines.append(__lowercase )
out_lines.append(__lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(__lowercase, __lowercase, __lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(__lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(__lowercase, __lowercase )
lowercase__ = os.path.join(__lowercase, __lowercase )
os.makedirs(__lowercase, exist_ok=__lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowercase )
if needs_manual_update:
with_manual_update.append(__lowercase )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.writelines(__lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(__lowercase )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowercase, __lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 37
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
while b:
lowercase__ , lowercase__ = b, a % b
return a
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b )
def __lowerCAmelCase ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 714
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[Any] =LEDTokenizer
UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[int], __lowercase : Dict ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
model.train()
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = F.mse_loss(SCREAMING_SNAKE_CASE_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(SCREAMING_SNAKE_CASE_ )
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase__ = AdamW(params=model.parameters() , lr=1e-3 )
lowercase__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowercase__ = LambdaLR(SCREAMING_SNAKE_CASE_ , lr_lambda=lambda SCREAMING_SNAKE_CASE_ : epoch**0.65 )
lowercase__ = LambdaLR(SCREAMING_SNAKE_CASE_ , lr_lambda=lambda SCREAMING_SNAKE_CASE_ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ )
# Use a single batch
lowercase__ , lowercase__ = next(iter(SCREAMING_SNAKE_CASE_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ )
# Use a single batch
lowercase__ , lowercase__ = next(iter(SCREAMING_SNAKE_CASE_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = Accelerator(
split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE_ ) )]
GradientState._reset_state()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = Accelerator(
split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = get_training_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ , lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
step_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE_ ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCAmelCase ( ):
lowercase__ = Accelerator()
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=16 )
lowercase__ = RegressionDataset(length=96 )
lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=16 )
lowercase__ , lowercase__ = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE_ )
if iteration < len(SCREAMING_SNAKE_CASE_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE_ )
if batch_num < len(SCREAMING_SNAKE_CASE_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCAmelCase ( ):
lowercase__ = Accelerator()
lowercase__ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(SCREAMING_SNAKE_CASE_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(SCREAMING_SNAKE_CASE_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : Any=3, __lowercase : str=32, __lowercase : List[str]=3, __lowercase : Union[str, Any]=10, __lowercase : Tuple=[10, 20, 30, 40], __lowercase : Optional[int]=[1, 1, 2, 1], __lowercase : Union[str, Any]=True, __lowercase : Dict=True, __lowercase : int="relu", __lowercase : Dict=3, __lowercase : List[str]=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(__lowercase )
def A__ ( self : Union[str, Any] ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : str ):
return ResNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def A__ ( self : Tuple, __lowercase : Optional[Any], __lowercase : Dict, __lowercase : Dict ):
lowercase__ = TFResNetModel(config=__lowercase )
lowercase__ = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def A__ ( self : Any, __lowercase : Dict, __lowercase : Optional[int], __lowercase : List[Any] ):
lowercase__ = self.num_labels
lowercase__ = TFResNetForImageClassification(__lowercase )
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Any ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : List[Any] =(
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[str] =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : Dict =False
def A__ ( self : Optional[Any] ):
lowercase__ = TFResNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase )
def A__ ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self : Dict ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A__ ( self : Dict ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A__ ( self : Any ):
pass
def A__ ( self : Any ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : Optional[int] ):
def check_hidden_states_output(__lowercase : Optional[int], __lowercase : str, __lowercase : Dict ):
lowercase__ = model_class(__lowercase )
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(__lowercase ), expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def A__ ( self : str ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def A__ ( self : Optional[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self : List[str] ):
lowercase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="tf" )
# forward pass
lowercase__ = model(**__lowercase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), __lowercase, atol=1e-4 ) )
| 716
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = session.run(SCREAMING_SNAKE_CASE_ )
print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 717
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
lowercase_ = logging.getLogger(__name__)
lowercase_ = {"""facebook/bart-base""": BartForConditionalGeneration}
lowercase_ = {"""facebook/bart-base""": BartTokenizer}
def __lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--config_name" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=SCREAMING_SNAKE_CASE_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Where to store the final ONNX file." )
lowercase__ = parser.parse_args()
return args
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" ):
lowercase__ = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowercase__ = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ )
if model_name in ["facebook/bart-base"]:
lowercase__ = 0
lowercase__ = None
lowercase__ = 0
return huggingface_model, tokenizer
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.eval()
lowercase__ = None
lowercase__ = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) )
with torch.no_grad():
lowercase__ = "My friends are cool but they eat too many carbs."
lowercase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
lowercase__ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=SCREAMING_SNAKE_CASE_ , )
logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ )
lowercase__ = ort_sess.run(
SCREAMING_SNAKE_CASE_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(SCREAMING_SNAKE_CASE_ ),
"max_length": np.array(SCREAMING_SNAKE_CASE_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def __lowerCAmelCase ( ):
lowercase__ = parse_args()
lowercase__ = 5
lowercase__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowercase__ = torch.device(args.device )
lowercase__ , lowercase__ = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(SCREAMING_SNAKE_CASE_ )
if args.max_length:
lowercase__ = args.max_length
if args.num_beams:
lowercase__ = args.num_beams
if args.output_file_path:
lowercase__ = args.output_file_path
else:
lowercase__ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""xlm"""
UpperCamelCase__ : List[Any] ={
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : Any, __lowercase : Any=3_0145, __lowercase : str=2048, __lowercase : str=12, __lowercase : List[str]=16, __lowercase : Union[str, Any]=0.1, __lowercase : Any=0.1, __lowercase : List[Any]=True, __lowercase : Any=False, __lowercase : Union[str, Any]=False, __lowercase : str=False, __lowercase : Tuple=1, __lowercase : int=True, __lowercase : Union[str, Any]=512, __lowercase : int=2048**-0.5, __lowercase : Union[str, Any]=1e-1_2, __lowercase : List[str]=0.02, __lowercase : Tuple=0, __lowercase : List[Any]=1, __lowercase : Dict=2, __lowercase : Optional[Any]=3, __lowercase : Union[str, Any]=5, __lowercase : Union[str, Any]=True, __lowercase : int="first", __lowercase : List[Any]=True, __lowercase : str=None, __lowercase : List[Any]=True, __lowercase : List[str]=0.1, __lowercase : Any=5, __lowercase : List[str]=5, __lowercase : int=0, __lowercase : int=0, __lowercase : Dict=2, __lowercase : List[str]=0, **__lowercase : Any, ):
lowercase__ = vocab_size
lowercase__ = emb_dim
lowercase__ = n_layers
lowercase__ = n_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = use_lang_emb
lowercase__ = layer_norm_eps
lowercase__ = bos_index
lowercase__ = eos_index
lowercase__ = pad_index
lowercase__ = unk_index
lowercase__ = mask_index
lowercase__ = is_encoder
lowercase__ = max_position_embeddings
lowercase__ = embed_init_std
lowercase__ = init_std
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_proj_to_labels
lowercase__ = summary_first_dropout
lowercase__ = start_n_top
lowercase__ = end_n_top
lowercase__ = mask_token_id
lowercase__ = lang_id
if "n_words" in kwargs:
lowercase__ = kwargs["n_words"]
super().__init__(pad_token_id=__lowercase, bos_token_id=__lowercase, **__lowercase )
class _snake_case ( lowercase__):
@property
def A__ ( self : List[Any] ):
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 719
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _snake_case ( lowercase__ , lowercase__):
UpperCamelCase__ : str =1
@register_to_config
def __init__( self : Optional[Any], __lowercase : Optional[int]=2000, __lowercase : Optional[Any]=0.1, __lowercase : str=20, __lowercase : Tuple=1e-3 ):
lowercase__ = None
lowercase__ = None
lowercase__ = None
def A__ ( self : Dict, __lowercase : Tuple, __lowercase : Union[str, torch.device] = None ):
lowercase__ = torch.linspace(1, self.config.sampling_eps, __lowercase, device=__lowercase )
def A__ ( self : Any, __lowercase : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : List[Any]=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowercase__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowercase__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowercase__ = std.flatten()
while len(std.shape ) < len(score.shape ):
lowercase__ = std.unsqueeze(-1 )
lowercase__ = -score / std
# compute
lowercase__ = -1.0 / len(self.timesteps )
lowercase__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowercase__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowercase__ = beta_t.unsqueeze(-1 )
lowercase__ = -0.5 * beta_t * x
lowercase__ = torch.sqrt(__lowercase )
lowercase__ = drift - diffusion**2 * score
lowercase__ = x + drift * dt
# add noise
lowercase__ = randn_tensor(x.shape, layout=x.layout, generator=__lowercase, device=x.device, dtype=x.dtype )
lowercase__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : int ):
return self.config.num_train_timesteps
| 720
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
lowercase_ = """Input must be a string of 8 numbers plus letter"""
lowercase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = f'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowercase__ = spanish_id.replace("-" , "" ).upper()
if len(SCREAMING_SNAKE_CASE_ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
lowercase__ = int(spanish_id_clean[0:8] )
lowercase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
'''simple docstring'''
lowercase_ = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 700
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 37
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] =""""""
UpperCamelCase__ : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCamelCase__ : str =None # compression type in fsspec. ex: "gzip"
UpperCamelCase__ : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Any, __lowercase : str = "", __lowercase : Optional[str] = None, __lowercase : Optional[dict] = None, **__lowercase : int ):
super().__init__(self, **__lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
__lowercase, mode="rb", protocol=__lowercase, compression=self.compression, client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
lowercase__ = os.path.basename(self.file.path.split("::" )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def A__ ( cls : Any, __lowercase : List[str] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__lowercase ).lstrip("/" )
def A__ ( self : List[str] ):
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowercase__ = {f["name"]: f}
def A__ ( self : Union[str, Any], __lowercase : str ):
return self.file.open().read()
def A__ ( self : Optional[int], __lowercase : str, __lowercase : str = "rb", __lowercase : int=None, __lowercase : str=True, __lowercase : Tuple=None, **__lowercase : Dict, ):
lowercase__ = self._strip_protocol(__lowercase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _snake_case ( lowercase__):
UpperCamelCase__ : Any ="""bz2"""
UpperCamelCase__ : Dict ="""bz2"""
UpperCamelCase__ : Optional[int] =""".bz2"""
class _snake_case ( lowercase__):
UpperCamelCase__ : int ="""gzip"""
UpperCamelCase__ : List[str] ="""gzip"""
UpperCamelCase__ : Optional[Any] =""".gz"""
class _snake_case ( lowercase__):
UpperCamelCase__ : Any ="""lz4"""
UpperCamelCase__ : Union[str, Any] ="""lz4"""
UpperCamelCase__ : List[str] =""".lz4"""
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""xz"""
UpperCamelCase__ : Any ="""xz"""
UpperCamelCase__ : str =""".xz"""
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] ="""zstd"""
UpperCamelCase__ : List[str] ="""zstd"""
UpperCamelCase__ : Any =""".zst"""
def __init__( self : Tuple, __lowercase : str, __lowercase : str = "rb", __lowercase : Optional[str] = None, __lowercase : Optional[dict] = None, __lowercase : int = DEFAULT_BLOCK_SIZE, **__lowercase : int, ):
super().__init__(
fo=__lowercase, mode=__lowercase, target_protocol=__lowercase, target_options=__lowercase, block_size=__lowercase, **__lowercase, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class _snake_case :
def __init__( self : Optional[Any], __lowercase : List[str] ):
lowercase__ = file_
def __enter__( self : Optional[Any] ):
self._file.__enter__()
return self
def __exit__( self : Optional[Any], *__lowercase : str, **__lowercase : Any ):
self._file.__exit__(*__lowercase, **__lowercase )
def __iter__( self : Dict ):
return iter(self._file )
def A__ ( self : Any ):
return next(self._file )
def __getattr__( self : Union[str, Any], __lowercase : Tuple ):
return getattr(self._file, __lowercase )
def fixed_enter(*__lowercase : Union[str, Any], **__lowercase : Dict ):
return WrappedFile(_enter(*__lowercase, **__lowercase ) )
lowercase__ = fixed_enter
| 701
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37
| 0
|
import re
from filelock import FileLock
try:
import nltk
lowercase_ = True
except (ImportError, ModuleNotFoundError):
lowercase_ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.